diff --git a/pixi.lock b/pixi.lock index 3a9173a0..197df8c3 100644 --- a/pixi.lock +++ b/pixi.lock @@ -1156,7 +1156,7 @@ packages: name: b3d-prototype version: 0.1.0 path: . - sha256: f9e12726749af0bf616bb7cf8a7d297dfd3fbb90aa5b5f77c54ecc28688cf3c9 + sha256: bdd715a11b829d0cd12a913edbd8290fda18c4eea572252a04c421d77fd19409 requires_dist: - genjax==0.5.0.post13.dev0+973fb60d - opencv-python>=4.10.0.84,<4.10.1 diff --git a/pyproject.toml b/pyproject.toml index 65d6fbb9..2be9c549 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,9 @@ platforms = ["linux-64"] [tool.pixi.system-requirements] cuda = "12" +[tool.pixi.activation] +scripts = ["scripts/env-activation.sh"] + [tool.pixi.pypi-options] index-url = "https://pypi.org/simple" extra-index-urls = ["https://oauth2accesstoken@us-west1-python.pkg.dev/probcomp-caliban/probcomp/simple"] @@ -60,10 +63,10 @@ ninja = "*" pytest = "*" [tool.pixi.tasks] -egl = "cd .pixi/envs/gpu/x86_64-conda-linux-gnu/sysroot/usr/lib64/ && ln -s libEGL_mesa.so.0.0.0 libEGL.so" +foo = "export FOO=8" rerun = "rerun --port 8812" b3d-pull = {cmd = "python b3d_pull.py -ow", cwd = "b3d/bucket_utils" } -test = { cmd = "pytest tests/dense_model_unit_tests/triangle_depth_posterior/test_triangle_depth_posterior.py", env = { XLA_PYTHON_CLIENT_PREALLOCATE = "false", XLA_PYTHON_CLIENT_ALLOCATOR = "platform", TORCH_CUDA_ARCH_LIST = "8.5", CPLUS_INCLUDE_PATH = "$CONDA_PREFIX/targets/x86_64-linux/include"} } +test = { cmd = "pytest tests/dense_model_unit_tests/triangle_depth_posterior/test_triangle_depth_posterior.py", env = { XLA_PYTHON_CLIENT_PREALLOCATE = "false", XLA_PYTHON_CLIENT_ALLOCATOR = "platform", CPLUS_INCLUDE_PATH = "$CONDA_PREFIX/targets/x86_64-linux/include"} } [tool.pytest.ini_options] pythonpath = ["src"] \ No newline at end of file diff --git a/scripts/env-activation.sh b/scripts/env-activation.sh new file mode 100755 index 00000000..408dc599 --- /dev/null +++ b/scripts/env-activation.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -euo pipefail + +capability=$(nvidia-smi --query-gpu=compute_cap --format=csv,noheader) + +export TORCH_CUDA_ARCH_LIST="$capability"