Skip to content
This repository has been archived by the owner on Jan 24, 2024. It is now read-only.

Commit

Permalink
change rpath of shared_library in python wheel package (#625) (#630)
Browse files Browse the repository at this point in the history
  • Loading branch information
hp03 authored Dec 8, 2021
1 parent f3353ab commit fcde5e8
Show file tree
Hide file tree
Showing 3 changed files with 90 additions and 67 deletions.
11 changes: 8 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,13 @@ set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
set(DOWNLOAD_MODEL_DIR "${THIRD_PARTY_PATH}/model")

option(WITH_TESTING "Compile with Unittests" OFF)
option(WITH_INFRT "Whether to build infrt" OFF)
option(WITH_MKL_CBLAS "Compile MKL with CBLAS support" ON)
option(WITH_MKLDNN "Compile MKLDNN support" ON)
option(WITH_CUDA "Compile with CUDA support" OFF)
option(WITH_CUDNN "Compile with CUDNN support" OFF)
option(WITH_DEBUG "Compile with debug information" OFF)
option(PUBLISH_LIBS "Whether to publish compiled libraries" OFF)
option(PUBLISH_LIBS "Whether to publish compiled libraries" ON)

set(PY_VERSION CACHE STRING FORCE)
if (NOT PY_VERSION)
Expand Down Expand Up @@ -87,13 +88,15 @@ set(global_test_args "--cinn_x86_builtin_code_root=${CMAKE_SOURCE_DIR}/cinn/back
set(core_deps CACHE INTERNAL "" FORCE)
set(hlir_src CACHE INTERNAL "" FORCE)
add_subdirectory(cinn)
if (WITH_INFRT)
add_subdirectory(infrt)
endif()
add_subdirectory(tests)
add_subdirectory(tutorials)

set(core_src "${cinnapi_src}")

cc_library(cinnapi SHARED SRCS ${cinnapi_src} DEPS glog ${llvm_libs} framework_proto param_proto paddle_framework_proto absl isl ginac)
cc_library(cinnapi SHARED SRCS ${cinnapi_src} DEPS glog ${llvm_libs} framework_proto param_proto framework_proto absl isl ginac)
add_dependencies(cinnapi GEN_LLVM_RUNTIME_IR_HEADER ZLIB::ZLIB)
add_dependencies(cinnapi GEN_LLVM_RUNTIME_IR_HEADER ${core_deps})
if (WITH_MKL_CBLAS)
Expand All @@ -114,7 +117,7 @@ function(gen_cinncore LINKTYPE)
if (${LINKTYPE} STREQUAL "STATIC")
set(CINNCORE_TARGET cinncore_static)
endif()
cc_library(${CINNCORE_TARGET} ${LINKTYPE} SRCS ${core_src} DEPS glog ${llvm_libs} framework_proto param_proto paddle_framework_proto absl isl ginac)
cc_library(${CINNCORE_TARGET} ${LINKTYPE} SRCS ${core_src} DEPS glog ${llvm_libs} framework_proto param_proto framework_proto absl isl ginac)
add_dependencies(${CINNCORE_TARGET} GEN_LLVM_RUNTIME_IR_HEADER ZLIB::ZLIB)
add_dependencies(${CINNCORE_TARGET} GEN_LLVM_RUNTIME_IR_HEADER ${core_deps})
if (WITH_MKL_CBLAS)
Expand All @@ -135,6 +138,7 @@ gen_cinncore(STATIC)
gen_cinncore(SHARED)

# MLIR td file generations
if (WITH_INFRT)
set(infrt_mlir_incs
ops_inc
basic_kernels_inc
Expand All @@ -148,6 +152,7 @@ set(infrt_mlir_incs

cc_library(infrt SRCS ${infrt_src} DEPS glog absl paddle_framework_proto ${mlir_libs})
add_dependencies(infrt ${infrt_mlir_incs})
endif ()

# --------distribute cinncore lib and include begin--------
if (PUBLISH_LIBS)
Expand Down
129 changes: 72 additions & 57 deletions docs/source/install.md
Original file line number Diff line number Diff line change
@@ -1,79 +1,94 @@
# Build from source code

## Dependencies
CINN is build and tested on Ubuntu-18.04 with GCC 8.2.0, third party libraries are provided for that environment and will be downloaded automatically. Other compatible environments should work, but we cann't gurantee it. Currently, CINN is under very active development, we provide Docker environment for you to have a quick experience. If you have any problem building CINN in your own environment, please try using Docker. More portability will be added to CINN in the future.

- gcc-8
- g++-8
- isl 0.22
Docker image we used to build and test CINN: `registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.2-cudnn8-gcc82`.

### Install isl
## Build without Docker
Build without Docker is not recommended for now. Third party dependencies are downloaded automatically by cmake, some libraries will be compiled, and others are static prebuilt. If you indeed have interest to build CINN in your own environment, you can use the content in `Build using Docker` section as a reference.

## Build using Docker

```sh
git clone https://github.com/Meinersbur/isl.git
git reset --hard isl-0.22
cd isl
./configure --with-clang=system
make -j
make install
```

### compile
Checkout CINN source code from git.

```sh
cd CINN
cp cmake/config.cmake <build_dir>/
```bash
$ git clone --depth 1 https://github.com/PaddlePaddle/CINN.git
```

Modify the `config.cmake`, change the `ISL_HOME` to the path isl installed.

Download docker image from registry.

### Install LLVM and MLIR
To use the latest version of MLIR, the latest llvm-project should be compiled and installed.

The git commit is `f9dc2b7079350d0fed3bb3775f496b90483c9e42`
```bash
$ docker pull registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.2-cudnn8-gcc82
```

*download llvm source code*
Start the container.

```sh
git clone https://github.com/llvm/llvm-project.git
```bash
$ docker run --gpus=all -it -v $PWD/CINN:/CINN /registry.baidubce.com/paddlepaddle/paddle:latest-dev-cuda11.2-cudnn8-gcc82 bin/bash
```
The git of the llvm-project is huge and git cloning in China is quite slow, use a http proxy if necessary.

*compile and install to local directory*

```sh
cd llvm-project
mkdir build && cd build

cmake -G Ninja ../llvm \
-DLLVM_ENABLE_PROJECTS=mlir \
-DLLVM_BUILD_EXAMPLES=OFF \
-DLLVM_TARGETS_TO_BUILD="X86" \
-DCMAKE_BUILD_TYPE=Release \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DLLVM_ENABLE_ZLIB=OFF \
-DLLVM_ENABLE_RTTI=ON \
-DCMAKE_INSTALL_PREFIX=$PWD/../install/llvmorg-9e42

ninja install -j8

Build CINN in the created container.

```bash
# create a build directory
$ mkdir /CINN/build && cd /CINN/build
# use cmake to generate Makefile and download dependencies, use flags to toggle on/off CUDA and CUDNN support
# e.g. 1) build with CUDA & CUDNN support
# cmake .. -DWITH_CUDA=ON -DWITH_CUDNN=On
# e.g. 2) build without CUDA & CUDNN support(CPU only, default)
# cmake .. -DWITH_CUDA=OFF -DWITH_CUDNN=OFF
$ cmake .. -DWITH_CUDA=ON -DWITH_CUDNN=ON
# build CINN
$ make
```

*add binary executables to environment variables*
`build/dist/cinn-xxxxx.whl` is the generated python wheel package, the real file name will differ given by the build options, python version, build environments, and git tag.

```sh
export PATH="$PWD/../install/llvmorg-9e42/bin:$PATH"
```bash
$ pip install build/dist/cinn.*.whl
```

*add llvm project directory to environment variables.*
```
export LLVM11_DIR="/path/to/llvm_directory/"
A demo using CINN's computation API.
```python
import numpy as np
from cinn.frontend import *
from cinn import Target
from cinn.framework import *
from cinn import runtime
from cinn import ir
from cinn import lang
from cinn.common import *

target = DefaultHostTarget()
#target = DefaultNVGPUTarget()

builder = CinnBuilder("test_basic")
a = builder.create_input(Float(32), (1, 24, 56, 56), "A")
b = builder.create_input(Float(32), (1, 24, 56, 56), "B")
c = builder.add(a, b)
d = builder.create_input(Float(32), (144, 24, 1, 1), "D")
e = builder.conv(c, d)

computation = Computation.build_and_compile(target, builder)

A_data = np.random.random([1, 24, 56, 56]).astype("float32")
B_data = np.random.random([1, 24, 56, 56]).astype("float32")
D_data = np.random.random([144, 24, 1, 1]).astype("float32")

computation.get_tensor("A").from_numpy(A_data, target)
computation.get_tensor("B").from_numpy(B_data, target)
computation.get_tensor("D").from_numpy(D_data, target)

computation.execute()

e_tensor = computation.get_tensor(str(e))
edata_cinn = e_tensor.numpy(target)
print(edata_cinn)
```

*check the llvm version*

```sh
llvm-config --version

# should get 12.0.0git
run the demo.
```
$ python demo.py
```
`target = DefaultHostTarget()` indicates CINN to use CPU for computing, well `target = DefaultNVGPUTarget()` uses GPU.
17 changes: 10 additions & 7 deletions python/setup.py.in
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,11 @@ import errno
from contextlib import contextmanager
from setuptools import setup

def set_rpath(lib, rpath):
command = "patchelf --set-rpath '{}' {}".format(rpath, lib)
if os.system(command) != 0:
raise Exception("patch {} failed, command: {}".format(lib, command))

def git_commit():
try:
cmd = ['git', 'rev-parse', 'HEAD']
Expand Down Expand Up @@ -132,17 +137,15 @@ if '${WITH_MKL_CBLAS}' == 'ON':

if '${WITH_CUDA}' == 'ON':
cinnlibs.append('${CMAKE_BINARY_DIR}/dist/cinn/include/cinn/runtime/cuda/cinn_cuda_runtime_source.cuh')
cinnlibs.append('${CUDA_NVRTC_LIB}')
cinnlibs.append('${CUDA_CUDA_LIBRARY}')
cinnlibs.append('${CUBLAS}')

if '${WITH_CUDNN}' == 'ON':
cinnlibs.append('${CUDNN}')

for lib in cinnlibs:
shutil.copy(lib, libs_path)
package_data['cinn.libs'].append(os.path.basename(lib))
libname = os.path.basename(lib)
if lib.endswith('so'):
set_rpath(os.path.join(libs_path, libname) , '$ORIGIN/')
package_data['cinn.libs'].append(libname)

set_rpath('${CMAKE_BINARY_DIR}/python/cinn/core_api.so', '$ORIGIN/libs/')

def git_commit():
try:
Expand Down

0 comments on commit fcde5e8

Please sign in to comment.