Skip to content

Commit

Permalink
metatensor: scaffold the build system
Browse files Browse the repository at this point in the history
  • Loading branch information
Luthaf committed Mar 22, 2024
1 parent a237024 commit 56716fa
Show file tree
Hide file tree
Showing 9 changed files with 5,054 additions and 3,871 deletions.
8,697 changes: 4,826 additions & 3,871 deletions configure

Large diffs are not rendered by default.

17 changes: 17 additions & 0 deletions configure.ac
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,7 @@ PLUMED_CONFIG_ENABLE([af_ocl],[search for arrayfire_ocl],[no])
PLUMED_CONFIG_ENABLE([af_cuda],[search for arrayfire_cuda],[no])
PLUMED_CONFIG_ENABLE([af_cpu],[search for arrayfire_cpu],[no])
PLUMED_CONFIG_ENABLE([libtorch],[search for libtorch],[no]) #added by luigibonati
PLUMED_CONFIG_ENABLE([metatensor],[search for metatensor],[no])

AC_ARG_VAR(SOEXT,[extension of dynamic libraries (so/dylib)])
AC_ARG_VAR(STATIC_LIBS,[variables that should be linked statically directly to MD code - configure will add here -ldl if necessary ])
Expand Down Expand Up @@ -928,6 +929,11 @@ if test "$af_cpu" = true ; then
PLUMED_CHECK_PACKAGE([arrayfire.h],[af_is_double],[__PLUMED_HAS_ARRAYFIRE],[afcpu])
fi

# metatensor requires libtorch
if test $metatensor = true ; then
libtorch=true;
fi

#added by luigibonati
if test $libtorch = true ; then
# disable as-needed in linking libraries (both static and shared)
Expand Down Expand Up @@ -965,6 +971,17 @@ if test $libtorch = true ; then
fi
fi

if test $metatensor = true ; then
# find metatensor and metatensor_torch
PLUMED_CHECK_CXX_PACKAGE([metatensor],[
#include <metatensor/torch.hpp>
int main() {
metatensor_torch::version();
return 0;
}
], [__PLUMED_HAS_METATENSOR], [metatensor metatensor_torch], [true])
fi

# in non-debug mode, add -DNDEBUG
if test "$debug" = false ; then
AC_MSG_NOTICE([Release mode, adding -DNDEBUG])
Expand Down
1 change: 1 addition & 0 deletions src/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@
!/s2cm
!/pytorch
!/membranefusion
!/metatensor

# And just ignore these files
*.xxd
Expand Down
12 changes: 12 additions & 0 deletions src/metatensor/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
/*
# in this directory, only accept source, Makefile and README
!/.gitignore
!/*.c
!/*.cpp
!/*.h
!/*.sh
!/Makefile
!/README
!/README.md
!/module.type
!/COPYRIGHT
4 changes: 4 additions & 0 deletions src/metatensor/Makefile
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
USE=core

#generic makefile
include ../maketools/make.module
68 changes: 68 additions & 0 deletions src/metatensor/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# Metatensor module for PLUMED


## Building the code

1. You'll need to fist install libtorch, either by installing PyTorch itself
with Python, or by downloading the prebuilt C++ library from
https://pytorch.org/get-started/locally/.

```bash
# point this to the path where you extracted the C++ libtorch
TORCH_PREFIX=../../..
# if you used Python to install torch, you can do this:
TORCH_CMAKE_PREFIX=$(python -c "import torch; print(torch.utils.cmake_prefix_path)")
TORCH_PREFIX=$(cd "$TORCH_CMAKE_PREFIX/../.." && pwd)

# patch a bug from torch's MKL detection
cd <PLUMED/DIR>
./src/metatensor/patch-torch.sh "$TORCH_PREFIX"
```

2. a) build and install metatensor-torch from source. You'll need a rust
compiler on your system, the easiest way is by using https://rustup.rs/

```bash
cd <SOME/PLACE/WHERE/TO/PUT/METATENSOR/SOURCES>

# define a location where metatensor should be installed
METATENSOR_PREFIX=<...>

METATENSOR_TORCH_PREFIX="$METATENSOR_PREFIX"

git clone https://github.com/lab-cosmo/metatensor --tag metatensor-torch-v0.3.0
cd metatensor

mkdir build && cd build
cmake -DBUILD_SHARED_LIBS=ON \
-DCMAKE_INSTALL_PREFIX="$METATENSOR_PREFIX" \
-DCMAKE_PREFIX_PATH="$TORCH_PREFIX" \
-DBUILD_METATENSOR_TORCH=ON \
-DMETATENSOR_INSTALL_BOTH_STATIC_SHARED=OFF \
..

cmake --build . --target install --parallel
```

2. b) alternatively, use metatensor-torch from Python (`pip install metatensor[torch]`)

```bash
METATENSOR_CMAKE_PREFIX=$(python -c "import metatensor; print(metatensor.utils.cmake_prefix_path)")
METATENSOR_PREFIX=$(cd "$METATENSOR_CMAKE_PREFIX/../.." && pwd)

METATENSOR_TORCH_CMAKE_PREFIX=$(python -c "import torch; print(torch.utils.cmake_prefix_path)")
METATENSOR_TORCH_PREFIX=$(cd "$METATENSOR_TORCH_CMAKE_PREFIX/../.." && pwd)
```

3. build Plumed itself

```bash
cd <PLUMED/DIR>

# configure with metatensor
./configure --enable-libtorch --enable-metatensor --enable-modules=+metatensor \
LDFLAGS="-L$TORCH_PREFIX/lib -L$METATENSOR_PREFIX/lib -L$METATENSOR_TORCH_PREFIX/lib" \
CPPFLAGS="-I$TORCH_PREFIX/include -I$TORCH_PREFIX/include/torch/csrc/api/include -I$METATENSOR_PREFIX/include -I$METATENSOR_TORCH_PREFIX/include"

make -j && make install
```
80 changes: 80 additions & 0 deletions src/metatensor/metatensor.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
/* ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Copyright (c) 2024 Guillaume Fraux
This module is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */

#if !defined(__PLUMED_HAS_LIBTORCH) || !defined(__PLUMED_HAS_METATENSOR)

// give a nice error message if the user tries to enable
// metatensor without enabling the corresponding libraries
#error "can not compile the metatensor module without the corresponding libraries, either the disable metatensor module or configure with `--enable-metatensor --enable-libtorch` and make sure the libraries can be found"

#else

#include "core/ActionAtomistic.h"
#include "core/ActionWithValue.h"
#include "core/ActionRegister.h"
#include "core/PlumedMain.h"


#include <torch/script.h>
#include <metatensor/torch.hpp>


namespace PLMD {

class MetatensorPlumedAction: public ActionAtomistic, public ActionWithValue {
public:
static void registerKeywords(Keywords& keys);
explicit MetatensorPlumedAction(const ActionOptions&);

void calculate() override;
void apply() override;
unsigned getNumberOfDerivatives() override;

private:

metatensor_torch::TorchTensorMap output_;
};

PLUMED_REGISTER_ACTION(MetatensorPlumedAction, "METATENSOR")

void MetatensorPlumedAction::registerKeywords(Keywords& keys) {
Action::registerKeywords(keys);
ActionAtomistic::registerKeywords(keys);
ActionWithValue::registerKeywords(keys);

throw std::runtime_error("unimplemented");
}

MetatensorPlumedAction::MetatensorPlumedAction(const ActionOptions& options):
Action(options),
ActionAtomistic(options),
ActionWithValue(options)
{
throw std::runtime_error("unimplemented");
}

unsigned MetatensorPlumedAction::getNumberOfDerivatives() {
// gradients w.r.t. positions (3 x N values) + gradients w.r.t. strain (9 values)
return 3 * this->getNumberOfAtoms() + 9;
}


void MetatensorPlumedAction::calculate() {
throw std::runtime_error("unimplemented");
}


void MetatensorPlumedAction::apply() {
throw std::runtime_error("unimplemented");
}

} // namespace PLMD


#endif
1 change: 1 addition & 0 deletions src/metatensor/module.type
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
default-off
45 changes: 45 additions & 0 deletions src/metatensor/patch-torch.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
#!/usr/bin/env bash

# This pulls in the fix from https://github.com/pytorch/pytorch/pull/119945
# until it is properly released

set -eu

TORCH_PREFIX=$1

if [ -f "$TORCH_PREFIX/share/cmake/Caffe2/public/mkl.cmake" ]; then
MKL_CMAKE="$TORCH_PREFIX/share/cmake/Caffe2/public/mkl.cmake"
elif [ -f "$TORCH_PREFIX/Caffe2/public/mkl.cmake" ]; then
MKL_CMAKE="$TORCH_PREFIX/Caffe2/public/mkl.cmake"
else
echo "Failed to find mkl.cmake in '$TORCH_PREFIX'"
exit 1
fi

cat > "$MKL_CMAKE" << EOF
find_package(MKL QUIET)
if(TARGET caffe2::mkl)
return()
endif()
add_library(caffe2::mkl INTERFACE IMPORTED)
target_include_directories(caffe2::mkl INTERFACE \${MKL_INCLUDE_DIR})
target_link_libraries(caffe2::mkl INTERFACE \${MKL_LIBRARIES})
foreach(MKL_LIB IN LISTS MKL_LIBRARIES)
if(EXISTS "\${MKL_LIB}")
get_filename_component(MKL_LINK_DIR "\${MKL_LIB}" DIRECTORY)
if(IS_DIRECTORY "\${MKL_LINK_DIR}")
target_link_directories(caffe2::mkl INTERFACE "\${MKL_LINK_DIR}")
endif()
endif()
endforeach()
# TODO: This is a hack, it will not pick up architecture dependent
# MKL libraries correctly; see https://github.com/pytorch/pytorch/issues/73008
set_property(
TARGET caffe2::mkl PROPERTY INTERFACE_LINK_DIRECTORIES
\${MKL_ROOT}/lib \${MKL_ROOT}/lib/intel64 \${MKL_ROOT}/lib/intel64_win \${MKL_ROOT}/lib/win-x64)
EOF

0 comments on commit 56716fa

Please sign in to comment.