diff --git a/Cargo.lock b/Cargo.lock index 5df4b781..91fce5f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -195,9 +195,22 @@ name = "boulder" version = "0.1.0" dependencies = [ "clap", + "config", "container", + "dirs", + "futures", + "hex", + "itertools 0.11.0", "moss", + "nix", + "serde", + "sha2", + "stone_recipe", + "strum", + "thiserror", "tokio", + "tui", + "url", ] [[package]] @@ -327,6 +340,19 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +[[package]] +name = "config" +version = "0.1.0" +dependencies = [ + "dirs", + "futures", + "serde", + "serde_yaml", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "console" version = "0.15.7" @@ -351,6 +377,7 @@ name = "container" version = "0.1.0" dependencies = [ "nix", + "thiserror", ] [[package]] @@ -537,6 +564,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs" +version = "5.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" +dependencies = [ + "dirs-sys", +] + +[[package]] +name = "dirs-sys" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" +dependencies = [ + "libc", + "option-ext", + "redox_users", + "windows-sys 0.48.0", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -1094,6 +1142,17 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" +dependencies = [ + "bitflags 2.4.1", + "libc", + "redox_syscall", +] + [[package]] name = "libsqlite3-sys" version = "0.27.0" @@ -1193,6 +1252,7 @@ dependencies = [ "bytes", "chrono", "clap", + "config", "dag", "futures", "hex", @@ -1214,6 +1274,7 @@ dependencies = [ "tui", "url", "vfs", + "xxhash-rust", ] [[package]] @@ -1322,6 +1383,12 @@ version = "11.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +[[package]] +name = "option-ext" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + [[package]] name = "parking_lot" version = "0.12.1" @@ -1532,6 +1599,17 @@ dependencies = [ "bitflags 1.3.2", ] +[[package]] +name = "redox_users" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +dependencies = [ + "getrandom", + "libredox", + "thiserror", +] + [[package]] name = "regex" version = "1.10.2" @@ -1687,6 +1765,12 @@ dependencies = [ "untrusted", ] +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + [[package]] name = "ryu" version = "1.0.15" @@ -2122,8 +2206,10 @@ dependencies = [ name = "stone_recipe" version = "0.1.0" dependencies = [ + "nom", "serde", "serde_yaml", + "strum", "thiserror", "url", ] @@ -2145,6 +2231,28 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strum" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.25.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.39", +] + [[package]] name = "subtle" version = "2.5.0" diff --git a/Cargo.toml b/Cargo.toml index d642d3e4..ed433fd9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,12 +16,14 @@ bytes = "1.5.0" chrono = "0.4.30" clap = { version = "4.4.10", features = ["derive"] } crossterm = "0.27.0" +dirs = "5.0" indicatif = "0.17.7" itertools = "0.11.0" futures = "0.3.28" hex = "0.4.3" log = "0.4" -nix = { version = "0.27.1", features = ["user", "fs", "sched", "process", "mount", "hostname"] } +nom = "7.1.3" +nix = { version = "0.27.1", features = ["user", "fs", "sched", "process", "mount", "hostname", "signal"] } once_cell = "1.18.0" petgraph = "0.6.4" rayon = "1.8" @@ -30,6 +32,7 @@ serde = { version = "1", features = ["derive"] } serde_yaml = "0.9" sha2 = "0.10.8" sqlx = { version = "0.7.3", features = ["sqlite", "chrono", "runtime-tokio"] } +strum = { version = "0.25", features = ["derive"] } thiserror = "1" tokio = { version = "1.34", features = ["full"] } tokio-stream = { version = "0.1.14", features = ["time"] } diff --git a/crates/boulder/Cargo.toml b/crates/boulder/Cargo.toml index b628b471..2158a0c0 100644 --- a/crates/boulder/Cargo.toml +++ b/crates/boulder/Cargo.toml @@ -6,8 +6,21 @@ edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +config = { path = "../config" } container = { path = "../container" } moss = { path = "../moss" } +stone_recipe = { path = "../stone_recipe" } +tui = { path = "../tui" } clap.workspace = true +dirs.workspace = true +futures.workspace = true +hex.workspace = true +itertools.workspace = true +nix.workspace = true +serde.workspace = true +sha2.workspace = true +strum.workspace = true +thiserror.workspace = true tokio.workspace = true +url.workspace = true diff --git a/crates/boulder/data/macros/actions/autotools.yml b/crates/boulder/data/macros/actions/autotools.yml new file mode 100644 index 00000000..52356beb --- /dev/null +++ b/crates/boulder/data/macros/actions/autotools.yml @@ -0,0 +1,62 @@ +actions: + + # Perform ./configure with the default options + - configure: + command: | + test -x ./configure || ( echo "%%configure: The ./configure script could not be found" ; exit 1 ) + ./configure %(options_configure) + dependencies: + - autoconf + - automake + + # Perform a make + # TIP: Add V=1 VERBOSE=1 after '%make' in the recipe if you need a more verbose build + - make: + command: | + make -j "%(jobs)" + dependencies: + - make + + # Install results of build to the destination directory + - make_install: + command: | + %make install DESTDIR="%(installroot)" + dependencies: + - make + + # Re autotools-configure a project without an autogen.sh script + - reconfigure: + command: | + autoreconf -vfi || ( echo "%%reconfigure: Failed to run autoreconf"; exit 1 ) + %configure + dependencies: + - autoconf + - automake + + # Run autogen.sh script, attempting to only configure once + - autogen: + command: | + NOCONFIGURE="noconfigure"; export NOCONFIGURE + sh ./autogen.sh %(options_configure) + ./configure %(options_configure) + dependencies: + - autoconf + - automake + +definitions: + + # Default configuration options as passed to configure + - options_configure: | + --prefix="%(prefix)" \ + --bindir="%(bindir)" \ + --sbindir="%(sbindir)" \ + --build="%(build_platform)" \ + --host="%(host_platform)" \ + --libdir="%(libdir)" \ + --mandir="%(mandir)" \ + --infodir="%(infodir)" \ + --datadir="%(datadir)" \ + --sysconfdir="%(sysconfdir)" \ + --localstatedir="%(localstatedir)" \ + --sharedstatedir="%(sharedstatedir)" \ + --libexecdir="%(libexecdir)" diff --git a/crates/boulder/data/macros/actions/cmake.yml b/crates/boulder/data/macros/actions/cmake.yml new file mode 100644 index 00000000..ca58ca5f --- /dev/null +++ b/crates/boulder/data/macros/actions/cmake.yml @@ -0,0 +1,44 @@ +actions: + + # Perform cmake with the default options in a subdirectory + - cmake: + command: | + cmake %(options_cmake) + dependencies: + - cmake + + # Perform cmake with unity build enabled + - cmake_unity: + command: | + cmake -DCMAKE_UNITY_BUILD=ON %(options_cmake) + dependencies: + - cmake + + # Build the cmake project + - cmake_build: + command: | + ninja -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - ninja + + # Install results of the build to the destination directory + - cmake_install: + command: | + DESTDIR="%(installroot)" ninja install -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - ninja + +definitions: + + # Default cmake options as passed to cmake + - options_cmake: | + -G Ninja -S . -B "%(builddir)" \ + -DCMAKE_C_FLAGS="${CFLAGS}" \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS}" \ + -DCMAKE_C_FLAGS_RELEASE="" \ + -DCMAKE_CXX_FLAGS_RELEASE="" \ + -DCMAKE_LD_FLAGS="${LDFLAGS}" \ + -DCMAKE_BUILD_TYPE="Release" \ + -DCMAKE_INSTALL_LIBDIR="lib" \ + -DCMAKE_INSTALL_PREFIX="%(prefix)" \ + -DCMAKE_LIB_SUFFIX="%(libsuffix)" diff --git a/crates/boulder/data/macros/actions/meson.yml b/crates/boulder/data/macros/actions/meson.yml new file mode 100644 index 00000000..b7465ed7 --- /dev/null +++ b/crates/boulder/data/macros/actions/meson.yml @@ -0,0 +1,43 @@ +actions: + + # Run meson with the default options in a subdirectory + - meson: + command: | + test -e ./meson.build || ( echo "%%meson: The ./meson.build script could not be found" ; exit 1 ) + CFLAGS="${CFLAGS}" CXXFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS}" meson setup %(options_meson) + dependencies: + - meson + + # Run meson with unity build enabled + - meson_unity: + command: | + test -e ./meson.build || ( echo "%%meson: The ./meson.build script could not be found" ; exit 1 ) + CFLAGS="${CFLAGS}" CXXFLAGS="${CXXFLAGS}" LDFLAGS="${LDFLAGS}" meson setup --unity on %(options_meson) + dependencies: + - meson + + # Build the meson project + - meson_build: + command: | + meson compile -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - meson + + # Install results of the build to the destination directory + - meson_install: + command: | + DESTDIR="%(installroot)" meson install --no-rebuild -C "%(builddir)" + dependencies: + - meson + +definitions: + + # Default meson options as passed to meson + - options_meson: | + --prefix="%(prefix)" \ + --buildtype="plain" \ + --libdir="lib%(libsuffix)" \ + --libexecdir="lib%(libsuffix)/%(name)" \ + --sysconfdir="%(sysconfdir)" \ + --localstatedir="%(localstatedir)" \ + "%(builddir)" diff --git a/crates/boulder/data/macros/actions/misc.yml b/crates/boulder/data/macros/actions/misc.yml new file mode 100644 index 00000000..c687045c --- /dev/null +++ b/crates/boulder/data/macros/actions/misc.yml @@ -0,0 +1,53 @@ +actions: + + # Install files to %(bindir) + - install_bin: + command: | + install -Dm00755 -t %(installroot)/%(bindir) + + # Macro to create an empty directory + - install_dir: + command: | + install -dm00755 + + # Macro to install a file with default executable permissions + - install_exe: + command: | + install -Dm00755 + + # Macro to install a file without executable permissions + - install_file: + command: | + install -Dm00644 + + # Patch sources from file + # Usage: %patch %(pkgdir)/${file} + # If you need to override -p#, add it after ${file} + # Example: %patch %(pkgdir)/some.patch -p3 + - patch: + command: | + patch -f -p1 -i + dependencies: + - patch + + # Create a tmpfiles.d file for the package with given content + - tmpfiles: + command: | + create_tmpfiles(){ + if [ -z "%(libsuffix)" ]; then + mkdir -p %(installroot)/%(tmpfilesdir) + echo "$@" >> %(installroot)/%(tmpfilesdir)/%(name).conf + fi + } + create_tmpfiles + + # Create a sysusers.d file for the package with given content + - sysusers: + command: | + create_sysusers(){ + if [ -z "%(libsuffix)" ]; then + mkdir -p %(installroot)/%(sysusersdir) + echo "$@" >> %(installroot)/%(sysusersdir)/%(name).conf + fi + } + create_sysusers diff --git a/crates/boulder/data/macros/actions/pgo.yml b/crates/boulder/data/macros/actions/pgo.yml new file mode 100644 index 00000000..05541398 --- /dev/null +++ b/crates/boulder/data/macros/actions/pgo.yml @@ -0,0 +1,124 @@ +actions: + # Actions required for PGO builds + + # Merge LLVM profile data, copies to combined.profdata in case there's no stage2 + - llvm_merge_s1: + command: | + llvm-profdata merge --failure-mode=all -output=%(pgo_dir)/ir.profdata %(pgo_dir)/IR/default*.profraw + cp %(pgo_dir)/ir.profdata %(pgo_dir)/combined.profdata + # Merge LLVM profile data after stage2 PGO workload + - llvm_merge_s2: + command: | + rm %(pgo_dir)/combined.profdata + llvm-profdata merge --failure-mode=all -output=%(pgo_dir)/combined.profdata %(pgo_dir)/ir.profdata %(pgo_dir)/CS/default*.profraw + + # Instrument file with llvm-bolt + - bolt_instr: + command: | + binstr(){ + mv ${1} ${1}.orig + mkdir -p %(pgo_dir)/BOLT + llvm-bolt ${1}.orig -instrument --instrumentation-file=%(pgo_dir)/BOLT/$(basename ${1}) --instrumentation-file-append-pid ${2} ${3} ${4} -o ${1} + } + binstr + + # Instrument file with llvm-bolt + - bolt_merge: + command: | + bmerge(){ + merge-fdata %(pgo_dir)/BOLT/$(basename ${1}).*.fdata > %(pgo_dir)/$(basename ${1}).fdata + } + bmerge + + # Apply bolt profile + - bolt_opt: + command: | + boptim(){ + llvm-bolt ${1}.orig -o ${1}.bolt -data=%(pgo_dir)/$(basename ${1}).fdata -reorder-blocks=cache+ -reorder-functions=hfsort+ -split-functions=3 -split-all-cold -split-eh -dyno-stats -icf=1 -use-gnu-stack ${2} ${3} ${4} + cp ${1}.bolt ${1} + } + boptim + dependencies: + - llvm-bolt + + # Collect perf data suitable for the bolt macros + - bolt_perf: + command: | + bperf(){ + mkdir -p %(pgo_dir)/BOLT + perf record -F 6000 -e cycles:u -j any,u -o %(pgo_dir)/BOLT/perf.data -- ${@} + } + bperf + dependencies: + - perf + + # Convert perf data into bolt equivalent + - bolt_perf2bolt: + command: | + bperf2bolt(){ + cp ${1} ${1}.orig + perf2bolt -ignore-build-id -p %(pgo_dir)/BOLT/perf.data -o %(pgo_dir)/$(basename ${1}).fdata ${1} + } + bperf2bolt + +tuning : + # A set of groups we can toggle from the "tune" key + + # Enable pgosample + - pgosample: + enabled: pgosample + + # Enable pgostage1 + - pgostage1: + enabled: pgostage1 + + # Enable pgostage2 + - pgostage2: + enabled: pgostage2 + + # Enable pgouse + - pgouse: + enabled: pgouse + +flags : + # PGO flag options + + # PGO sample flags for when workload is not comprehensive (workload builds only) + - pgosample: + llvm: + c : "-fno-profile-sample-accurate" + cxx : "-fno-profile-sample-accurate" + ld : "-fno-profile-sample-accurate" + gnu: + c : "-fprofile-partial-training" + cxx : "-fprofile-partial-training" + ld : "-fprofile-partial-training" + + # PGO stage1 flags for ProfileStage1 (workload builds only) + - pgostage1: + llvm: + c : "-fprofile-generate=%(pgo_dir)/IR" + cxx : "-fprofile-generate=%(pgo_dir)/IR" + ld : "-fprofile-generate=%(pgo_dir)/IR" + gnu: + c : "-fprofile-generate -fprofile-dir=%(pgo_dir)" + cxx : "-fprofile-generate -fprofile-dir=%(pgo_dir)" + ld : "-fprofile-generate -fprofile-dir=%(pgo_dir)" + + # PGO stage2 flags for ProfileStage2 (workload builds only) + - pgostage2: + llvm: + c : "-fprofile-use=%(pgo_dir)/ir.profdata -fcs-profile-generate=%(pgo_dir)/CS" + cxx : "-fprofile-use=%(pgo_dir)/ir.profdata -fcs-profile-generate=%(pgo_dir)/CS" + ld : "-fprofile-use=%(pgo_dir)/ir.profdata -fcs-profile-generate=%(pgo_dir)/CS" + + # PGO final flags for ProfileUse (workload builds only) + - pgouse: + llvm: + c : "-fprofile-use=%(pgo_dir)/combined.profdata" + cxx : "-fprofile-use=%(pgo_dir)/combined.profdata" + ld : "-fprofile-use=%(pgo_dir)/combined.profdata" + gnu: + c : "-fprofile-use -fprofile-dir=%(pgo_dir) -fprofile-correction" + cxx : "-fprofile-use -fprofile-dir=%(pgo_dir) -fprofile-correction" + ld : "-fprofile-use -fprofile-dir=%(pgo_dir) -fprofile-correction" diff --git a/crates/boulder/data/macros/actions/python.yml b/crates/boulder/data/macros/actions/python.yml new file mode 100644 index 00000000..eba9ab2a --- /dev/null +++ b/crates/boulder/data/macros/actions/python.yml @@ -0,0 +1,31 @@ +actions: + + # Perform python setup and build with the default options + - python: + command: | + test -e ./setup.py || ( echo "%%python: The ./setup.py script could not be found" ; exit 1 ) + python3 setup.py build + dependencies: + - python + + # Install python package to the destination directory + - python_install: + command: | + python3 setup.py install --root="%(installroot)" + dependencies: + - python + + # Build a wheel for python PEP517 projects + - pyproject_build: + command: | + python3 -m build --wheel --no-isolation + dependencies: + - python-build + - python-wheel + + # Install wheel to destination directory + - pyproject_install: + command: | + python3 -m installer --destdir="%(installroot)" dist/*.whl + dependencies: + - python-installer diff --git a/crates/boulder/data/macros/arch/aarch64.yml b/crates/boulder/data/macros/arch/aarch64.yml new file mode 100644 index 00000000..2aaa5e71 --- /dev/null +++ b/crates/boulder/data/macros/arch/aarch64.yml @@ -0,0 +1,23 @@ +# Provides -m64 builds for aarch64 build-hosts + +definitions: + + - libsuffix : "" + - build_platform : aarch64-%(vendorID) + - host_platform : aarch64-%(vendorID) + - cc : "%(compiler_c) -m64" + - cxx : "%(compiler_cxx) -m64" + - cpp : "%(compiler_cpp) -m64" + - march : armv8-a+simd+fp+crypto + - mtune : cortex-a72.cortex-a53 + +flags: + + # Set architecture flags + - architecture: + llvm: + c : "-march=armv8-a+simd+fp+crypto -mtune=cortex-a72" + cxx : "-march=armv8-a+simd+fp+crypto -mtune=cortex-a72" + gcc: + c : "-march=armv8-a+simd+fp+crypto -mtune=cortex-a72.cortex-a53" + cxx : "-march=armv8-a+simd+fp+crypto -mtune=cortex-a72.cortex-a53" diff --git a/crates/boulder/data/macros/arch/base.yml b/crates/boulder/data/macros/arch/base.yml new file mode 100644 index 00000000..9970399b --- /dev/null +++ b/crates/boulder/data/macros/arch/base.yml @@ -0,0 +1,588 @@ +# Provides core definitions which each profile may override + +definitions: + + # Basic variables required for packages to build correctly + - libsuffix : "" + - prefix : "/usr" + - bindir : "%(prefix)/bin" + - sbindir : "%(prefix)/sbin" + - includedir : "%(prefix)/include" + - datadir : "%(prefix)/share" + - localedir : "%(datadir)/locale" + - infodir : "%(datadir)/info" + - mandir : "%(datadir)/man" + - docdir : "%(datadir)/doc" + - vendordir : "%(datadir)/defaults" + - completionsdir : "%(datadir)/bash-completion/completions" + - tmpfilesdir : "%(prefix)/lib/tmpfiles.d" + - sysusersdir : "%(prefix)/lib/sysusers.d" + - udevrulesdir : "%(prefix)/lib/udev/rules.d" + - localstatedir : "/var" + - sharedstatedir : "%(localstatedir)/lib" + - runstatedir : "/run" + - sysconfdir : "/etc" + - libdir : "%(prefix)/lib%(libsuffix)" + - libexecdir : "%(libdir)/%(name)" + - builddir : "serpent_builddir" + + # The vendorID is encoded into the triplet, toolchain, builds, etc. + # It must match the triplet from bootstrap-scripts. + - vendorID : "serpent-linux" + + # Must be set for CC/CXX/CPP to work + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - objc : "%(compiler_objc)" + - objcxx : "%(compiler_objcxx)" + - cpp : "%(compiler_cpp)" + - objcpp : "%(compiler_objcpp)" + - objcxxcpp : "%(compiler_objcxxcpp)" + - ar : "%(compiler_ar)" + - ld : "%(compiler_ld)" + - objcopy : "%(compiler_objcopy)" + - nm : "%(compiler_nm)" + - ranlib : "%(compiler_ranlib)" + - strip : "%(compiler_strip)" + - path : "%(compiler_path)" + - ccachedir : "%(compiler_cache)" + - pkgconfigpath : "%(libdir)/pkgconfig:/usr/share/pkgconfig" + +actions : + + # scriptBase is merged to the top of all newly generated build scripts. + - scriptBase : + command: | + #!/bin/sh + set -e + set -x + TERM="dumb"; export TERM + PKG_CONFIG_PATH="%(pkgconfigpath)"; export PKG_CONFIG_PATH + CFLAGS="%(cflags)"; export CFLAGS + CGO_CFLAGS="%(cflags)"; export CGO_CFLAGS + CXXFLAGS="%(cxxflags)"; export CXXFLAGS + CGO_CXXFLAGS="%(cxxflags)"; export CGO_CXXFLAGS + LDFLAGS="%(ldflags)"; export LDFLAGS + CGO_LDFLAGS="%(ldflags) -Wl,--no-gc-sections"; export CGO_LDFLAGS + CC="%(cc)"; export CC + CXX="%(cxx)"; export CXX + OBJC="%(objc)"; export OBJC + OBJCXX="%(objcxx)"; export OBJCXX + CPP="%(cpp)"; export CPP + OBJCPP="%(objcpp)"; export OBJCPP + OBJCXXCPP="%(objcxxcpp)"; export OBJCXXCPP + AR="%(ar)"; export AR + LD="%(ld)"; export LD + OBJCOPY="%(objcopy)"; export OBJCOPY + NM="%(nm)"; export NM + RANLIB="%(ranlib)"; export RANLIB + STRIP="%(strip)"; export STRIP + PATH="%(path)"; export PATH + CCACHE_DIR="%(ccachedir)"; export CCACHE_DIR; + test -z "$CCACHE_DIR" && unset CCACHE_DIR; + LANG="en_US.UTF-8"; export LANG + LC_ALL="en_US.UTF-8"; export LC_ALL + test -d "%(workdir)" || (echo "The work directory %(workdir) does not exist"; exit 1) + cd "%(workdir)" && echo "The work directory %%(workdir) is ${PWD}" + +defaultTuningGroups : + - asneeded + - avxwidth + - base + - bindnow + - debug + - fortify + - frame-pointer + - harden + - icf + - optimize + - relr + - symbolic + +tuning : + # A set of groups we can toggle from the "tune" key + + # Architecture flags should always be enabled + - architecture: + enabled: + - architecture + + # Base flags should almost always be enabled, but want to be able to disable + - base: + enabled: + - base + + - debug: + options: + - lines: + enabled: debug-lines + - std: + enabled: debug-std + default: std + + # Toggle frame-pointer + - frame-pointer: + enabled: no-omit-frame-pointer + disabled: omit-frame-pointer + + # Enable bindnow functionality + - bindnow: + enabled: bindnow + + # Enable symbolic + - symbolic: + options: + - all: + enabled: symbolic-all + - functions: + enabled: symbolic-functions + - nonweak: + enabled: symbolic-nonweak + default: functions + + # Enable fortify + - fortify: + enabled: fortify + + # Enable hardening + - harden: + options: + - none: + enabled: harden-none + - lvl1: + enabled: harden-lvl1 + - lvl2: + enabled: harden-lvl2 + disabled: harden-none + default: lvl1 + + # Enable optimisation per given levels + - optimize: + options: + - fast: + enabled: optimize-fast + - generic: + enabled: optimize-generic + - size: + enabled: + - optimize-size + - sections + - speed: + enabled: optimize-speed + default: generic + + # Enable LTO + - lto: + options: + - full: + enabled: lto-full + - thin: + enabled: lto-thin + default: full + + # Enable LTOextra. Requires the equivalent lto option + - ltoextra: + options: + - full: + enabled: ltoextra-full + - thin: + enabled: ltoextra-thin + default: full + + # Enable ICF + - icf: + options: + - safe: + enabled: icf-safe + - all: + enabled: icf-all + default: safe + + # Enable Ignore data address equality + - idae: + enabled: idae + + # Enable Polly + - polly: + enabled: polly + + # Enable section splitting + - sections: + enabled: sections + + # Toggle common + - common: + enabled: common + + # Enable math + - math: + enabled: math + + # Enable noplt + - noplt: + enabled: + - noplt + - bindnow + + # Enable nosemantic + - nosemantic: + enabled: nosemantic + + # Enable nodaed + - nodaed: + enabled: nodaed + + # Enable asneeded + - asneeded: + enabled: asneeded + + # Enable avxwidth + - avxwidth: + enabled: avxwidth-128 + + # Enable bolt + - bolt: + enabled: bolt + + # Enable runpath + - runpath: + enabled: runpath + + # Enable sse2avx + - sse2avx: + enabled: sse2avx + + # Enable pch-instantiate + - pch-instantiate: + enabled: pch-instantiate + + # Enable visibility + - visibility: + options: + - inline: + enabled: visibility-inline + - hidden: + enabled: visibility-hidden + default: inline + + # Enable relative-vtables + - relative-vtables: + enabled: relative-vtables + + # Enable relr + - relr: + enabled: relr + +flags : + + # Needs overriding with -march/mtune values. + - architecture: + c : "" + cxx : "" + ld : "" + + # Base flags, enabled by default + - base: + c : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" + cxx : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" + ld : "-Wl,-O2,--gc-sections" + + - omit-frame-pointer: + c : "-fomit-frame-pointer -momit-leaf-frame-pointer" + cxx : "-fomit-frame-pointer -momit-leaf-frame-pointer" + + - no-omit-frame-pointer: + c : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" + cxx : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" + + # Toggle bindnow (ON) + - bindnow: + ld : "-Wl,-z,relro,-z,now" + + # Toggle symbolic (ON symbolic-functions) + - symbolic-all: + ld : "-Wl,-Bsymbolic" + + - symbolic-functions: + ld : "-Wl,-Bsymbolic-functions" + + - symbolic-nonweak: + llvm: + ld : "-Wl,-Bsymbolic-non-weak-functions" + + # Toggle fortify (ON) + - fortify: + c : "-D_FORTIFY_SOURCE=2" + cxx : "-D_FORTIFY_SOURCE=2" + + # No hardening! + - harden-none: + c : "-fno-stack-protector" + cxx : "-fno-stack-protector" + + # Hardening (ON harden-lvl1) + - harden-lvl1: + c : "-fstack-protector --param ssp-buffer-size=32" + cxx : "-fstack-protector --param ssp-buffer-size=32" + + - harden-lvl2: + llvm: + c : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + cxx : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + gnu: + c : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + cxx : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + + # Use section splitting, improves GC without lto only (OFF) + - sections: + c : "-ffunction-sections -fdata-sections" + cxx : "-ffunction-sections -fdata-sections" + + # Optimize without care for math issues + - optimize-fast: + c : "-Ofast" + cxx : "-Ofast" + + # Generic optimisation case (ON) + - optimize-generic: + c : "-O2" + cxx : "-O2" + + # Optimize for size (OFF) + - optimize-size: + c : "-Os" + cxx : "-Os" + + # Optimize for speed (OFF) + - optimize-speed: + c : "-O3" + cxx : "-O3" + + # Enable LTO optimisations (OFF) + - lto-full: + c : "-flto" + cxx : "-flto" + ld : "-flto" + + # Enable Thin-LTO optimisations (OFF) + - lto-thin: + llvm: + c : "-flto=thin" + cxx : "-flto=thin" + ld : "-flto=thin" + + # Enable LTOextra optimisations (OFF) + - ltoextra-full: + gnu: + c : "-fdevirtualize-at-ltrans" + cxx : "-fdevirtualize-at-ltrans" + llvm: + c : "-fwhole-program-vtables -fvirtual-function-elimination" + cxx : "-fwhole-program-vtables -fvirtual-function-elimination" + + # Enable Thin-LTOextra optimisations (OFF) + - ltoextra-thin: + llvm: + c : "-fwhole-program-vtables" + cxx : "-fwhole-program-vtables" + + # Enable ALL LLVM ICF optimisations (OFF) + - icf-all: + llvm: + ld : "-Wl,--icf=all" + + # Enable LLVM ICF optimisations (ON) + - icf-safe: + llvm: + ld : "-Wl,--icf=safe" + + # Ignore data address equality (OFF) + - idae: + llvm: + ld : "-Wl,--ignore-data-address-equality" + + # Enable LLVM polly optimisations (OFF) + - polly: + llvm: + c : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" + cxx : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" + + # Toggle options you want to use with llvm-bolt (OFF) + - bolt: + gnu: + c : "-fno-reorder-blocks-and-partition​" + cxx : "-fno-reorder-blocks-and-partition​" + ld : "-Wl,-q" + llvm: + ld : "-Wl,-q" + + # Toggle -fcommon (OFF) + - common: + c : "-fcommon" + cxx : "-fcommon" + + # Toggle debug-lines optimisations + - debug-lines: + llvm: + c : "-gline-tables-only -fasynchronous-unwind-tables" + cxx : "-gline-tables-only -fasynchronous-unwind-tables" + + # Toggle debug-std optimisations (ON) + - debug-std: + c : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + cxx : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + + # Toggle fast math (OFF) + - math: + gnu: + c : "-fno-math-errno -fno-trapping-math" + cxx : "-fno-math-errno -fno-trapping-math" + llvm: + c : "-fno-math-errno -fno-trapping-math -ffp-contract=fast -ffp-model=fast" + cxx : "-fno-math-errno -fno-trapping-math -ffp-contract=fast -ffp-model=fast" + + # Toggle noplt, requires bindnow (OFF) + - noplt: + c : "-fno-plt" + cxx : "-fno-plt" + + # Toggle -fno-semantic-interposition (OFF) + - nosemantic: + c : "-fno-semantic-interposition" + cxx : "-fno-semantic-interposition" + + # Toggle -fno-direct-access-external-data (OFF) + - nodaed: + llvm: + c : "-fno-direct-access-external-data" + cxx : "-fno-direct-access-external-data" + + # Prefer 128-bit vector width (ON) + - avxwidth-128: + c : "-mprefer-vector-width=128" + cxx : "-mprefer-vector-width=128" + + # Toggle -fpch-instantiate-templates (OFF) + - pch-instantiate: + llvm: + c : "-fpch-instantiate-templates" + cxx : "-fpch-instantiate-templates" + + # Toggle asneeded (ON) + - asneeded: + ld : "-Wl,--as-needed" + + # Toggle runpath (OFF) + - runpath: + ld : "-Wl,--enable-new-dtags" + + # Toggle sse2avx (OFF) + - sse2avx: + gnu: + c : "-msse2avx" + cxx : "-msse2avx" + + # Toggle visibility hidden (OFF) + - visibility-hidden: + c : "-fvisibility=hidden" + cxx : "-fvisibility-inlines-hidden -fvisibility=hidden" + + # Toggle visibility inlines hidden (OFF) + - visibility-inline: + cxx : "-fvisibility-inlines-hidden" + + # Enable relative vtables (OFF) + - relative-vtables: + llvm: + cxx : "-fexperimental-library -fexperimental-relative-c++-abi-vtables" + + # Toggle relr (ON) + - relr: + ld : "-Wl,-z,pack-relative-relocs" + +# Template packages +packages : + + # Main package + - "%(name)": + paths: + - "*" + + # Some documentation + - "%(name)-docs": + summary: "Documentation for %(name)" + description: | + Documentation files for the %(name) package + paths: + - /usr/share/gtk-doc + + # Main development subpackage + - "%(name)-devel": + summary: "Development files for %(name)" + description: | + Install this package if you intend to build software against + the %(name) package. + paths: + - /usr/include + - /usr/lib/*.a + - /usr/lib/cmake + - /usr/lib/lib*.so + - /usr/lib/pkgconfig + - /usr/share/aclocal + - /usr/share/man/man2 + - /usr/share/man/man3 + - /usr/share/man/man9 + - /usr/share/pkgconfig + rundeps: + - "%(name)" + + # Main dbginfo package + - "%(name)-dbginfo": + summary: "Debugging symbols for %(name)" + description: | + Install this package if you need debugging information + symbols + for the %(name) package. + paths: + - /usr/lib/debug + + # Template for a -libs sub-package which can be used by adding paths via the stone.yml file + - "%(name)-libs": + summary: "Library files for %(name)" + description: | + Library files for %(name), typically pulled in as a dependency of another package. + + # 32-bit compat libraries + - "%(name)-32bit": + summary: "Provides 32-bit runtime libraries for %(name)" + description: | + Install this package if you need the 32-bit versions of the + %(name) package libraries. + paths: + - /usr/lib32 + - /usr/lib32/lib*.so.* + rundeps: + - "%(name)" + + # 32-bit development files + - "%(name)-32bit-devel": + summary: "Provides development files for %(name)-32bit" + description: | + Install this package if you need to build software against + the 32-bit version of %(name), %(name)-32bit. + paths: + - /usr/lib32/*.a + - /usr/lib32/cmake + - /usr/lib32/lib*.so + - /usr/lib32/pkgconfig + rundeps: + - "%(name)-32bit" + - "%(name)-devel" + + # 32-bit debug symbols + - "%(name)-32bit-dbginfo": + summary: "Debugging symbols for %(name)-32bit" + description: | + Install this package if you need debugging information + symbols + for the %(name)-32bit package. + paths: + - /usr/lib32/debug diff --git a/crates/boulder/data/macros/arch/emul32/x86_64.yml b/crates/boulder/data/macros/arch/emul32/x86_64.yml new file mode 100644 index 00000000..be4b96fd --- /dev/null +++ b/crates/boulder/data/macros/arch/emul32/x86_64.yml @@ -0,0 +1,20 @@ +# Provides -m32 builds for x86_64 build-hosts + +definitions: + + - libsuffix : "32" + - build_platform : i686-%(vendorID) + - host_platform : i686-%(vendorID) + - cc : "%(compiler_c) -m32" + - cxx : "%(compiler_cxx) -m32" + - cpp : "%(compiler_cpp) -m32" + - march : i686 + - mtune : i686 + - pkgconfigpath : "%(libdir)/pkgconfig:/usr/share/pkgconfig:%(prefix)/lib/pkgconfig" + +flags: + + # Set architecture flags + - architecture: + c : "-march=i686 -mtune=i686" + cxx : "-march=i686 -mtune=i686" diff --git a/crates/boulder/data/macros/arch/x86.yml b/crates/boulder/data/macros/arch/x86.yml new file mode 100644 index 00000000..06ef7e92 --- /dev/null +++ b/crates/boulder/data/macros/arch/x86.yml @@ -0,0 +1,19 @@ +# Provides -m32 builds for i686 build-hosts + +definitions: + + - libsuffix : "32" + - build_platform : i686-%(vendorID) + - host_platform : i686-%(vendorID) + - cc : "%(compiler_c) -m32" + - cxx : "%(compiler_cxx) -m32" + - cpp : "%(compiler_cpp) -m32" + - march : i686 + - mtune : i686 + +flags: + + # Set architecture flags + - architecture: + c : "-march=i686 -mtune=i686" + cxx : "-march=i686 -mtune=i686" diff --git a/crates/boulder/data/macros/arch/x86_64-stage1.yml b/crates/boulder/data/macros/arch/x86_64-stage1.yml new file mode 100644 index 00000000..a1e3b0b8 --- /dev/null +++ b/crates/boulder/data/macros/arch/x86_64-stage1.yml @@ -0,0 +1,29 @@ +# Based on x86_64 - provides the stage1 bootstrap definitions +# We force a new cross compilation step into existence with +# our "-xvendorID" +# +# This is washed out in stage2. +# We also force a "GNU/Linux" host ABI in this stage. +definitions: + + - prefix : "/usr/bootstrap-stage1" + - libsuffix : "" + - build_platform : x86_64-linux-gnu + - host_platform : x86_64-%(vendorID) + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - cpp : "%(compiler_cpp)" + - march : x86-64-v2 + - mtune : ivybridge + - bootstrap_root : /bill + +flags: + + # Set architecture flags + - architecture: + c : "-march=x86-64-v2 -mtune=ivybridge" + cxx : "-march=x86-64-v2 -mtune=ivybridge" + +defaultTuningGroups : + - base + - optimize \ No newline at end of file diff --git a/crates/boulder/data/macros/arch/x86_64.yml b/crates/boulder/data/macros/arch/x86_64.yml new file mode 100644 index 00000000..7e812f8b --- /dev/null +++ b/crates/boulder/data/macros/arch/x86_64.yml @@ -0,0 +1,19 @@ +# Provides -m64 builds for x86_64 build-hosts + +definitions: + + - libsuffix : "" + - build_platform : x86_64-%(vendorID) + - host_platform : x86_64-%(vendorID) + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - cpp : "%(compiler_cpp)" + - march : x86-64-v2 + - mtune : ivybridge + +flags: + + # Set architecture flags + - architecture: + c : "-march=x86-64-v2 -mtune=ivybridge" + cxx : "-march=x86-64-v2 -mtune=ivybridge" diff --git a/crates/boulder/src/architecture.rs b/crates/boulder/src/architecture.rs new file mode 100644 index 00000000..88deaebb --- /dev/null +++ b/crates/boulder/src/architecture.rs @@ -0,0 +1,66 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::fmt; + +pub const fn host() -> Architecture { + #[cfg(target_arch = "x86_64")] + { + Architecture::X86_64 + } + #[cfg(target_arch = "x86")] + { + Architecture::X86 + } + #[cfg(target_arch = "aarch64")] + { + Architecture::Aarch64 + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display)] +#[strum(serialize_all = "lowercase")] +pub enum Architecture { + X86_64, + X86, + Aarch64, +} + +impl Architecture { + pub fn supports_emul32(&self) -> bool { + match self { + Architecture::X86_64 => true, + Architecture::X86 => false, + Architecture::Aarch64 => true, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum BuildTarget { + Native(Architecture), + Emul32(Architecture), +} + +impl BuildTarget { + pub fn emul32(&self) -> bool { + matches!(self, BuildTarget::Emul32(_)) + } + + pub fn host_architecture(&self) -> Architecture { + match self { + BuildTarget::Native(arch) => *arch, + BuildTarget::Emul32(arch) => *arch, + } + } +} + +impl fmt::Display for BuildTarget { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + BuildTarget::Native(arch) => write!(f, "{arch}"), + BuildTarget::Emul32(arch) => write!(f, "emul32/{arch}"), + } + } +} diff --git a/crates/boulder/src/builder.rs b/crates/boulder/src/builder.rs new file mode 100644 index 00000000..468d646b --- /dev/null +++ b/crates/boulder/src/builder.rs @@ -0,0 +1,260 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + fs, io, + os::unix::process::ExitStatusExt, + path::{Path, PathBuf}, + process, +}; + +use nix::{sys::signal::Signal, unistd::Pid}; +use stone_recipe::Recipe; +use thiserror::Error; +use tui::Stylize; + +use crate::{ + architecture::BuildTarget, + container::{self, ExecError}, + job::{self, Step}, + macros, paths, pgo, profile, recipe, root, upstream, util, Env, Job, Macros, Paths, Runtime, +}; + +pub struct Builder { + pub targets: Vec, + pub recipe: Recipe, + pub paths: Paths, + pub macros: Macros, + pub ccache: bool, + pub env: Env, + profile: profile::Id, +} + +pub struct Target { + pub build_target: BuildTarget, + pub jobs: Vec, +} + +impl Builder { + pub fn new( + recipe_path: &Path, + env: Env, + profile: profile::Id, + ccache: bool, + ) -> Result { + let recipe_bytes = fs::read(recipe_path)?; + let recipe = stone_recipe::from_slice(&recipe_bytes)?; + + let macros = Macros::load(&env)?; + + let paths = Paths::new( + paths::Id::new(&recipe), + recipe_path, + &env.cache_dir, + "/mason", + )?; + + let build_targets = recipe::build_targets(&recipe); + + if build_targets.is_empty() { + return Err(Error::NoBuildTargets); + } + + let targets = build_targets + .into_iter() + .map(|build_target| { + let stages = pgo::stages(&recipe, build_target) + .map(|stages| stages.into_iter().map(Some).collect::>()) + .unwrap_or_else(|| vec![None]); + + let jobs = stages + .into_iter() + .map(|stage| Job::new(build_target, stage, &recipe, &paths, ¯os, ccache)) + .collect::, _>>()?; + + Ok(Target { build_target, jobs }) + }) + .collect::, job::Error>>()?; + + Ok(Self { + targets, + recipe, + paths, + macros, + ccache, + env, + profile, + }) + } + + pub fn extra_deps(&self) -> impl Iterator { + self.targets.iter().flat_map(|target| { + target.jobs.iter().flat_map(|job| { + job.steps + .values() + .flat_map(|script| script.dependencies.iter().map(String::as_str)) + }) + }) + } + + pub fn setup(&self) -> Result<(), Error> { + root::clean(self)?; + + let rt = Runtime::new()?; + + rt.block_on(async { + let profiles = profile::Manager::new(&self.env).await; + + let repos = profiles.repositories(&self.profile)?.clone(); + + root::populate(self, repos).await?; + upstream::sync(&self.recipe, &self.paths).await?; + + Ok(()) as Result<_, Error> + })?; + + Ok(()) + } + + pub fn build(self) -> Result<(), Error> { + container::exec(&self.paths, self.recipe.options.networking, || { + // We're now in the container =) + + for (i, target) in self.targets.iter().enumerate() { + if i > 0 { + println!(); + } + println!("{}", target.build_target.to_string().dim()); + + for (i, job) in target.jobs.iter().enumerate() { + let is_pgo = job.pgo_stage.is_some(); + + // Recreate work dir for each job + util::sync::recreate_dir(&job.work_dir)?; + // Ensure pgo dir exists + if is_pgo { + let pgo_dir = PathBuf::from(format!("{}-pgo", job.build_dir.display())); + util::sync::ensure_dir_exists(&pgo_dir)?; + } + + if let Some(stage) = job.pgo_stage { + if i > 0 { + println!("{}", "│".dim()); + } + println!("{}", format!("│pgo-{stage}").dim()); + } + + for (i, (step, script)) in job.steps.iter().enumerate() { + let pipes = if job.pgo_stage.is_some() { + "││".dim() + } else { + "│".dim() + }; + + if i > 0 { + println!("{pipes}"); + } + println!("{pipes}{}", step.styled(format!("{step}"))); + + let build_dir = &job.build_dir; + let work_dir = &job.work_dir; + + // TODO: Proper temp file + let script_path = "/tmp/script"; + std::fs::write(script_path, &script.content).unwrap(); + + let current_dir = if work_dir.exists() { + &work_dir + } else { + &build_dir + }; + + let mut command = logged(*step, is_pgo, "/bin/sh")? + .arg(script_path) + .env_clear() + .env("HOME", build_dir) + .env("PATH", "/usr/bin:/usr/sbin") + .env("TERM", "xterm-256color") + .current_dir(current_dir) + .spawn()?; + + ::container::forward_sigint(Pid::from_raw(command.id() as i32))?; + + let result = command.wait()?; + + if !result.success() { + match result.code() { + Some(code) => { + return Err(ExecError::Code(code)); + } + None => { + if let Some(signal) = result + .signal() + .or_else(|| result.stopped_signal()) + .and_then(|i| Signal::try_from(i).ok()) + { + return Err(ExecError::Signal(signal)); + } else { + return Err(ExecError::UnknownSignal); + } + } + } + } + } + } + } + + Ok(()) + })?; + Ok(()) + } +} + +fn logged(step: Step, is_pgo: bool, command: &str) -> Result { + let out_log = log(step, is_pgo)?; + let err_log = log(step, is_pgo)?; + + let mut command = process::Command::new(command); + command + .stdout(out_log.stdin.unwrap()) + .stderr(err_log.stdin.unwrap()); + + Ok(command) +} + +// TODO: Ikey plz make look nice +fn log(step: Step, is_pgo: bool) -> Result { + let pgo = is_pgo.then_some("│").unwrap_or_default().dim(); + let kind = step.styled(format!("{}│", step.abbrev())); + let tag = format!("{}{pgo}{kind} ", "│".dim()); + + process::Command::new("awk") + .arg(format!(r#"{{ print "{tag}" $0 }}"#)) + .env("PATH", "/usr/bin:/usr/sbin") + .env("TERM", "xterm-256color") + .stdin(process::Stdio::piped()) + .spawn() +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("no supported build targets for recipe")] + NoBuildTargets, + #[error("macros")] + Macros(#[from] macros::Error), + #[error("job")] + Job(#[from] job::Error), + #[error("profile")] + Profile(#[from] profile::Error), + #[error("root")] + Root(#[from] root::Error), + #[error("upstream")] + Upstream(#[from] upstream::Error), + #[error("stone recipe")] + StoneRecipe(#[from] stone_recipe::Error), + #[error("container")] + Container(#[from] container::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/cli.rs b/crates/boulder/src/cli.rs new file mode 100644 index 00000000..fe8774ea --- /dev/null +++ b/crates/boulder/src/cli.rs @@ -0,0 +1,71 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +use std::path::PathBuf; + +use boulder::{env, Env}; +use clap::{Args, Parser}; +use thiserror::Error; + +mod build; +mod chroot; +mod profile; + +#[derive(Debug, Parser)] +#[command()] +pub struct Command { + #[command(flatten)] + pub global: Global, + #[command(subcommand)] + pub subcommand: Subcommand, +} + +#[derive(Debug, Args)] +pub struct Global { + #[arg(long, global = true)] + pub cache_dir: Option, + #[arg(long, global = true)] + pub config_dir: Option, + #[arg(long, global = true)] + pub data_dir: Option, + #[arg(long, global = true)] + pub moss_root: Option, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + Build(build::Command), + Chroot(chroot::Command), + Profile(profile::Command), +} + +pub fn process() -> Result<(), Error> { + let Command { global, subcommand } = Command::parse(); + + let env = Env::new( + global.cache_dir, + global.config_dir, + global.data_dir, + global.moss_root, + )?; + + match subcommand { + Subcommand::Build(command) => build::handle(command, env)?, + Subcommand::Chroot(command) => chroot::handle(command, env)?, + Subcommand::Profile(command) => profile::handle(command, env)?, + } + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("build")] + Build(#[from] build::Error), + #[error("chroot")] + Chroot(#[from] chroot::Error), + #[error("profile")] + Profile(#[from] profile::Error), + #[error("env")] + Env(#[from] env::Error), +} diff --git a/crates/boulder/src/cli/build.rs b/crates/boulder/src/cli/build.rs new file mode 100644 index 00000000..3e4176a6 --- /dev/null +++ b/crates/boulder/src/cli/build.rs @@ -0,0 +1,69 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::io; +use std::path::PathBuf; + +use boulder::builder; +use boulder::Builder; +use boulder::{profile, Env}; +use clap::Parser; +use thiserror::Error; + +#[derive(Debug, Parser)] +#[command(about = "Build ... TODO")] +pub struct Command { + #[arg(short, long)] + profile: profile::Id, + #[arg( + short, + long = "compiler-cache", + help = "Enable compiler caching", + default_value = "false" + )] + ccache: bool, + #[arg( + short, + long, + default_value = ".", + help = "Directory to store build results" + )] + output: PathBuf, + #[arg(default_value = "./stone.yml", help = "Path to recipe file")] + recipe: PathBuf, +} + +pub fn handle(command: Command, env: Env) -> Result<(), Error> { + let Command { + profile, + output, + recipe, + ccache, + } = command; + + if !output.exists() { + return Err(Error::MissingOutput(output)); + } + if !recipe.exists() { + return Err(Error::MissingRecipe(recipe)); + } + + let builder = Builder::new(&recipe, env, profile, ccache)?; + builder.setup()?; + builder.build()?; + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("output directory does not exist: {0:?}")] + MissingOutput(PathBuf), + #[error("recipe file does not exist: {0:?}")] + MissingRecipe(PathBuf), + #[error("builder")] + Builder(#[from] builder::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/cli/chroot.rs b/crates/boulder/src/cli/chroot.rs new file mode 100644 index 00000000..f0408326 --- /dev/null +++ b/crates/boulder/src/cli/chroot.rs @@ -0,0 +1,61 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{fs, io, path::PathBuf}; + +use boulder::{container, paths, Env, Paths}; +use clap::Parser; +use thiserror::Error; + +#[derive(Debug, Parser)] +#[command(about = "Chroot into the build environment")] +pub struct Command { + #[arg(default_value = "./stone.yml", help = "Path to recipe file")] + recipe: PathBuf, +} + +pub fn handle(command: Command, env: Env) -> Result<(), Error> { + let Command { + recipe: recipe_path, + } = command; + + if !recipe_path.exists() { + return Err(Error::MissingRecipe(recipe_path)); + } + + let recipe_bytes = fs::read(&recipe_path)?; + let recipe = stone_recipe::from_slice(&recipe_bytes)?; + + let paths = Paths::new( + paths::Id::new(&recipe), + &recipe_path, + env.cache_dir, + "/mason", + )?; + + let rootfs = paths.rootfs().host; + + // Has rootfs been setup? + if !rootfs.join("usr").exists() { + return Err(Error::MissingRootFs); + } + + container::chroot(&paths, recipe.options.networking)?; + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("recipe file does not exist: {0:?}")] + MissingRecipe(PathBuf), + #[error("build root doesn't exist, make sure to run build first")] + MissingRootFs, + #[error("container")] + Container(#[from] container::Error), + #[error("stone recipe")] + StoneRecipe(#[from] stone_recipe::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/cli/profile.rs b/crates/boulder/src/cli/profile.rs new file mode 100644 index 00000000..940ea0da --- /dev/null +++ b/crates/boulder/src/cli/profile.rs @@ -0,0 +1,158 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{collections::HashMap, io}; + +use boulder::{profile, Env, Profile, Runtime}; +use clap::Parser; +use itertools::Itertools; +use moss::{repository, Repository}; +use thiserror::Error; +use url::Url; + +#[derive(Debug, Parser)] +#[command(about = "Manage boulder profiles")] +pub struct Command { + #[command(subcommand)] + subcommand: Subcommand, +} + +#[derive(Debug, clap::Subcommand)] +pub enum Subcommand { + #[command(about = "List profiles")] + List, + #[command(about = "Add a new profile")] + Add { + #[arg(help = "profile name")] + name: String, + #[arg( + short = 'r', + long = "repo", + required = true, + help = "profile repositories", + value_parser = parse_repository, + help = "repository to add to profile, can be passed multiple times", + long_help = "repository to add to profile\n\nExample: --repo name=volatile,uri=https://dev.serpentos.com/volatile/x86_64/stone.index,priority=100" + )] + repos: Vec<(repository::Id, Repository)>, + }, + #[command(about = "Update a profiles repositories")] + Update { + #[arg(short, long)] + profile: profile::Id, + }, +} + +/// Parse a single key-value pair +fn parse_repository(s: &str) -> Result<(repository::Id, Repository), String> { + let key_values = s + .split(',') + .filter_map(|kv| kv.split_once('=')) + .collect::>(); + + let id = repository::Id::new(key_values.get("name").ok_or("missing name")?.to_string()); + let uri = key_values + .get("uri") + .ok_or("missing uri")? + .parse::() + .map_err(|e| e.to_string())?; + let priority = key_values + .get("priority") + .map(|p| p.parse::()) + .transpose() + .map_err(|e| e.to_string())? + .unwrap_or_default(); + + Ok(( + id, + Repository { + description: String::default(), + uri, + priority: repository::Priority::new(priority), + }, + )) +} + +pub fn handle(command: Command, env: Env) -> Result<(), Error> { + let rt = Runtime::new()?; + let manager = rt.block_on(profile::Manager::new(&env)); + + match command.subcommand { + Subcommand::List => list(manager), + Subcommand::Add { name, repos } => rt.block_on(add(&env, manager, name, repos)), + Subcommand::Update { profile } => rt.block_on(update(&env, manager, &profile)), + } +} + +pub fn list(manager: profile::Manager) -> Result<(), Error> { + if manager.profiles.is_empty() { + println!("No profiles have been configured yet"); + return Ok(()); + } + + for (id, profile) in manager.profiles.iter() { + println!("{id}:"); + + for (id, repo) in profile + .collections + .iter() + .sorted_by(|(_, a), (_, b)| a.priority.cmp(&b.priority).reverse()) + { + println!(" - {} = {} [{}]", id, repo.uri, repo.priority); + } + } + + Ok(()) +} + +pub async fn add<'a>( + env: &'a Env, + mut manager: profile::Manager<'a>, + name: String, + repos: Vec<(repository::Id, Repository)>, +) -> Result<(), Error> { + let id = profile::Id::new(name); + + manager + .save_profile( + id.clone(), + Profile { + collections: repository::Map::with(repos), + }, + ) + .await?; + + update(env, manager, &id).await?; + + println!("Profile \"{id}\" has been added"); + + Ok(()) +} + +pub async fn update<'a>( + env: &'a Env, + manager: profile::Manager<'a>, + profile: &profile::Id, +) -> Result<(), Error> { + let repos = manager.repositories(profile)?.clone(); + + let mut moss_client = moss::Client::new("boulder", &env.moss_dir) + .await? + .explicit_repositories(repos) + .await?; + moss_client.refresh_repositories().await?; + + Ok(()) +} +#[derive(Debug, Error)] +pub enum Error { + #[error("config")] + Config(#[from] config::SaveError), + #[error("profile")] + Profile(#[from] profile::Error), + #[error("moss client")] + MossClient(#[from] moss::client::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/container.rs b/crates/boulder/src/container.rs new file mode 100644 index 00000000..ac4e1e61 --- /dev/null +++ b/crates/boulder/src/container.rs @@ -0,0 +1,84 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{io, process}; + +use container::Container; +use nix::sys::signal::Signal; +use thiserror::Error; + +use crate::Paths; + +pub fn exec( + paths: &Paths, + networking: bool, + f: impl FnMut() -> Result<(), ExecError>, +) -> Result<(), Error> { + run(paths, networking, f) +} + +pub fn chroot(paths: &Paths, networking: bool) -> Result<(), Error> { + let home = &paths.build().guest; + + run(paths, networking, || { + let mut child = process::Command::new("/bin/bash") + .arg("--login") + .env_clear() + .env("HOME", home) + .env("PATH", "/usr/bin:/usr/sbin") + .env("TERM", "xterm-256color") + .spawn()?; + + child.wait()?; + + Ok(()) + }) +} + +fn run( + paths: &Paths, + networking: bool, + f: impl FnMut() -> Result<(), ExecError>, +) -> Result<(), Error> { + let rootfs = paths.rootfs().host; + let artefacts = paths.artefacts(); + let build = paths.build(); + let compiler = paths.ccache(); + let recipe = paths.recipe(); + + Container::new(rootfs) + .hostname("boulder") + .networking(networking) + .ignore_host_sigint(true) + .work_dir(&build.guest) + .bind_rw(&artefacts.host, &artefacts.guest) + .bind_rw(&build.host, &build.guest) + .bind_rw(&compiler.host, &compiler.guest) + .bind_ro(&recipe.host, &recipe.guest) + .run::(f)?; + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error(transparent)] + Container(#[from] container::Error), + #[error("io")] + Io(#[from] io::Error), +} + +#[derive(Debug, Error)] +pub enum ExecError { + #[error("failed with status code {0}")] + Code(i32), + #[error("stopped by signal {}", .0.as_str())] + Signal(Signal), + #[error("stopped by unknown signal")] + UnknownSignal, + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/dependency.rs b/crates/boulder/src/dependency.rs new file mode 100644 index 00000000..99bd45ee --- /dev/null +++ b/crates/boulder/src/dependency.rs @@ -0,0 +1,108 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::HashSet; + +use stone_recipe::{tuning::Toolchain, Upstream}; + +use crate::Builder; + +pub fn calculate(builder: &Builder) -> Vec<&str> { + let mut packages = BASE_PACKAGES.to_vec(); + + match builder.recipe.options.toolchain { + Toolchain::Llvm => packages.extend(LLVM_PACKAGES), + Toolchain::Gnu => packages.extend(GNU_PACKAGES), + } + + if builder.recipe.emul32 { + packages.extend(BASE32_PACKAGES); + + match builder.recipe.options.toolchain { + Toolchain::Llvm => packages.extend(LLVM32_PACKAGES), + Toolchain::Gnu => packages.extend(GNU32_PACKAGES), + } + } + + if builder.ccache { + packages.push(CCACHE_PACKAGE); + } + + packages.extend(builder.recipe.build.build_deps.iter().map(String::as_str)); + packages.extend(builder.recipe.build.check_deps.iter().map(String::as_str)); + + for upstream in &builder.recipe.upstreams { + if let Upstream::Plain { uri, .. } = upstream { + let path = uri.path(); + + if let Some((_, ext)) = path.rsplit_once('.') { + match ext { + "xz" => { + packages.extend(["binary(tar)", "binary(xz)"]); + } + "zst" => { + packages.extend(["binary(tar)", "binary(zstd)"]); + } + "bz2" => { + packages.extend(["binary(tar)", "binary(bzip2)"]); + } + "gz" => { + packages.extend(["binary(tar)", "binary(gzip)"]); + } + "zip" => { + packages.push("binary(unzip)"); + } + "rpm" => { + packages.extend(["binary(rpm2cpio)", "cpio"]); + } + "deb" => { + packages.push("binary(ar)"); + } + _ => {} + } + } + } + } + + // Dependencies from all scripts in the builder + let extra_deps = builder.extra_deps(); + + packages + .into_iter() + .chain(extra_deps) + // Remove dupes + .collect::>() + .into_iter() + .collect() +} + +const BASE_PACKAGES: &[&str] = &[ + "bash", + "boulder", + "coreutils", + "dash", + "diffutils", + "findutils", + "gawk", + "glibc-devel", + "grep", + "libarchive", + "linux-headers", + "pkgconf", + "sed", + "util-linux", + // Needed for chroot + "binary(git)", + "binary(nano)", + "binary(vim)", +]; +const BASE32_PACKAGES: &[&str] = &["glibc-32bit-devel"]; + +const GNU_PACKAGES: &[&str] = &["binutils", "gcc-devel"]; +const GNU32_PACKAGES: &[&str] = &["gcc-32bit-devel"]; + +const LLVM_PACKAGES: &[&str] = &["clang"]; +const LLVM32_PACKAGES: &[&str] = &["clang-32bit", "libcxx-32bit-devel"]; + +const CCACHE_PACKAGE: &str = "binary(ccache)"; diff --git a/crates/boulder/src/env.rs b/crates/boulder/src/env.rs new file mode 100644 index 00000000..b4efb732 --- /dev/null +++ b/crates/boulder/src/env.rs @@ -0,0 +1,90 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{io, path::PathBuf}; + +use thiserror::Error; + +use crate::util; + +pub struct Env { + pub cache_dir: PathBuf, + pub data_dir: PathBuf, + pub moss_dir: PathBuf, + pub config: config::Manager, +} + +impl Env { + pub fn new( + cache_dir: Option, + config_dir: Option, + data_dir: Option, + moss_root: Option, + ) -> Result { + let is_root = util::is_root(); + + let config = if let Some(dir) = config_dir { + config::Manager::custom(dir) + } else if is_root { + config::Manager::system("/", "boulder") + } else { + config::Manager::user("boulder")? + }; + + let cache_dir = resolve_cache_dir(is_root, cache_dir)?; + let data_dir = resolve_data_dir(data_dir); + let moss_dir = resolve_moss_root(is_root, moss_root)?; + + util::sync::ensure_dir_exists(&cache_dir)?; + util::sync::ensure_dir_exists(&data_dir)?; + util::sync::ensure_dir_exists(&moss_dir)?; + + Ok(Self { + config, + cache_dir, + data_dir, + moss_dir, + }) + } +} + +fn resolve_cache_dir(is_root: bool, custom: Option) -> Result { + if let Some(dir) = custom { + Ok(dir) + } else if is_root { + Ok(PathBuf::from("/var/cache/boulder")) + } else { + Ok(dirs::cache_dir().ok_or(Error::UserCache)?.join("boulder")) + } +} + +fn resolve_data_dir(custom: Option) -> PathBuf { + custom.unwrap_or_else(|| "/usr/share/boulder".into()) +} + +fn resolve_moss_root(is_root: bool, custom: Option) -> Result { + if let Some(dir) = custom { + Ok(dir) + } else if is_root { + Ok(PathBuf::from("/")) + } else { + Ok(dirs::cache_dir().ok_or(Error::UserCache)?.join("moss")) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("cannot find cache dir, $XDG_CACHE_HOME or $HOME env not set")] + UserCache, + #[error("cannot find config dir, $XDG_CONFIG_HOME or $HOME env not set")] + UserConfig, + #[error("io")] + Io(#[from] io::Error), +} + +impl From for Error { + fn from(_: config::CreateUserError) -> Self { + Error::UserConfig + } +} diff --git a/crates/boulder/src/job.rs b/crates/boulder/src/job.rs new file mode 100644 index 00000000..6b916983 --- /dev/null +++ b/crates/boulder/src/job.rs @@ -0,0 +1,109 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + collections::BTreeMap, + io, + path::{Path, PathBuf}, +}; + +use stone_recipe::{script, tuning, Recipe, Script, Upstream}; +use thiserror::Error; + +pub use self::step::Step; +use crate::{architecture::BuildTarget, pgo, util, Macros, Paths}; + +mod step; + +#[derive(Debug)] +pub struct Job { + pub target: BuildTarget, + pub pgo_stage: Option, + pub steps: BTreeMap, + pub work_dir: PathBuf, + pub build_dir: PathBuf, +} + +impl Job { + pub fn new( + target: BuildTarget, + pgo_stage: Option, + recipe: &Recipe, + paths: &Paths, + macros: &Macros, + ccache: bool, + ) -> Result { + let build_dir = paths.build().guest.join(target.to_string()); + let work_dir = work_dir(&build_dir, &recipe.upstreams); + + let steps = step::list(pgo_stage) + .into_iter() + .filter_map(|step| { + let result = step + .script(target, pgo_stage, recipe, paths, macros, ccache) + .transpose()?; + Some(result.map(|script| (step, script))) + }) + .collect::>()?; + + Ok(Self { + target, + pgo_stage, + steps, + work_dir, + build_dir, + }) + } +} + +fn work_dir(build_dir: &Path, upstreams: &[Upstream]) -> PathBuf { + let mut work_dir = build_dir.to_path_buf(); + + // Work dir is the first upstream that should be unpacked + if let Some(upstream) = upstreams.iter().find(|upstream| match upstream { + Upstream::Plain { unpack, .. } => *unpack, + Upstream::Git { .. } => true, + }) { + match upstream { + Upstream::Plain { + uri, + rename, + unpack_dir, + .. + } => { + let file_name = util::uri_file_name(uri); + let rename = rename.as_deref().unwrap_or(file_name); + let unpack_dir = unpack_dir + .as_ref() + .map(|dir| dir.display().to_string()) + .unwrap_or_else(|| rename.to_string()); + + work_dir = build_dir.join(unpack_dir); + } + Upstream::Git { uri, clone_dir, .. } => { + let source = util::uri_file_name(uri); + let target = clone_dir + .as_ref() + .map(|dir| dir.display().to_string()) + .unwrap_or_else(|| source.to_string()); + + work_dir = build_dir.join(target); + } + } + } + + work_dir +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("missing arch macros: {0}")] + MissingArchMacros(String), + #[error("script")] + Script(#[from] script::Error), + #[error("tuning")] + Tuning(#[from] tuning::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/job/step.rs b/crates/boulder/src/job/step.rs new file mode 100644 index 00000000..c6bb3e95 --- /dev/null +++ b/crates/boulder/src/job/step.rs @@ -0,0 +1,374 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::HashSet; + +use itertools::Itertools; +use stone_recipe::{ + script, + tuning::{self, Toolchain}, + Recipe, Script, +}; + +use tui::Stylize; + +use super::{work_dir, Error}; +use crate::{architecture::BuildTarget, pgo, recipe, util, Macros, Paths}; + +pub fn list(pgo_stage: Option) -> Vec { + if matches!(pgo_stage, Some(pgo::Stage::One | pgo::Stage::Two)) { + Step::WORKLOAD.to_vec() + } else { + Step::NORMAL.to_vec() + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, strum::Display)] +pub enum Step { + Prepare, + Setup, + Build, + Install, + Check, + Workload, +} + +impl Step { + const NORMAL: &'static [Self] = &[ + Step::Prepare, + Step::Setup, + Step::Build, + Step::Install, + Step::Check, + ]; + const WORKLOAD: &'static [Self] = &[Step::Prepare, Step::Setup, Step::Build, Step::Workload]; + + pub fn abbrev(&self) -> &str { + match self { + Step::Prepare => "P", + Step::Setup => "S", + Step::Build => "B", + Step::Install => "I", + Step::Check => "C", + Step::Workload => "W", + } + } + + pub fn styled(&self, s: impl ToString) -> String { + let s = s.to_string(); + // Taste the rainbow + // TODO: Ikey plz make pretty + match self { + Step::Prepare => s.grey(), + Step::Setup => s.cyan(), + Step::Build => s.blue(), + Step::Check => s.yellow(), + Step::Install => s.green(), + Step::Workload => s.magenta(), + } + .dim() + .to_string() + } + + pub fn script( + &self, + target: BuildTarget, + pgo_stage: Option, + recipe: &Recipe, + paths: &Paths, + macros: &Macros, + ccache: bool, + ) -> Result, Error> { + let build = recipe::build_target_definition(recipe, target); + + let Some(mut content) = (match self { + Step::Prepare => Some(prepare_script(&recipe.upstreams)), + Step::Setup => build.setup.clone(), + Step::Build => build.build.clone(), + Step::Check => build.check.clone(), + Step::Install => build.install.clone(), + Step::Workload => match build.workload.clone() { + Some(mut content) => { + if matches!(recipe.options.toolchain, Toolchain::Llvm) { + if matches!(pgo_stage, Some(pgo::Stage::One)) { + content.push_str("%llvm_merge_s1"); + } else if matches!(pgo_stage, Some(pgo::Stage::Two)) { + content.push_str("%llvm_merge_s2"); + } + } + + Some(content) + } + None => None, + }, + }) else { + return Ok(None); + }; + + if content.is_empty() { + return Ok(None); + } + + if let Some(env) = build.environment.as_deref() { + if env != "(null)" && !env.is_empty() && !matches!(self, Step::Prepare) { + content = format!("{env} {content}"); + } + } + + content = format!("%scriptBase\n{content}"); + + let mut parser = script::Parser::new(); + + let build_target = target.to_string(); + let build_dir = paths.build().guest.join(&build_target); + let work_dir = if matches!(self, Step::Prepare) { + build_dir.clone() + } else { + work_dir(&build_dir, &recipe.upstreams) + }; + let num_jobs = util::num_cpus(); + + for arch in ["base", &build_target] { + let macros = macros + .arch + .get(arch) + .cloned() + .ok_or_else(|| Error::MissingArchMacros(arch.to_string()))?; + + parser.add_macros(macros.clone()); + } + + for macros in macros.actions.clone() { + parser.add_macros(macros.clone()); + } + + parser.add_definition("name", &recipe.source.name); + parser.add_definition("version", &recipe.source.version); + parser.add_definition("release", recipe.source.release); + parser.add_definition("jobs", num_jobs); + parser.add_definition("pkgdir", paths.recipe().guest.join("pkg").display()); + parser.add_definition("sourcedir", paths.upstreams().guest.display()); + parser.add_definition("installroot", paths.install().guest.display()); + parser.add_definition("buildroot", build_dir.display()); + parser.add_definition("workdir", work_dir.display()); + + // TODO: Remaining definitions & tune flags + parser.add_definition("cflags", ""); + parser.add_definition("cxxflags", ""); + parser.add_definition("ldflags", ""); + + parser.add_definition("compiler_cache", "/mason/ccache"); + + let path = if ccache { + "/usr/lib/ccache/bin:/usr/bin:/bin" + } else { + "/usr/bin:/bin" + }; + + /* Set the relevant compilers */ + if matches!(recipe.options.toolchain, Toolchain::Llvm) { + parser.add_definition("compiler_c", "clang"); + parser.add_definition("compiler_cxx", "clang++"); + parser.add_definition("compiler_objc", "clang"); + parser.add_definition("compiler_objcxx", "clang++"); + parser.add_definition("compiler_cpp", "clang -E -"); + parser.add_definition("compiler_objcpp", "clang -E -"); + parser.add_definition("compiler_objcxxcpp", "clang++ -E"); + parser.add_definition("compiler_ar", "llvm-ar"); + parser.add_definition("compiler_ld", "ld.lld"); + parser.add_definition("compiler_objcopy", "llvm-objcopy"); + parser.add_definition("compiler_nm", "llvm-nm"); + parser.add_definition("compiler_ranlib", "llvm-ranlib"); + parser.add_definition("compiler_strip", "llvm-strip"); + parser.add_definition("compiler_path", path); + } else { + parser.add_definition("compiler_c", "gcc"); + parser.add_definition("compiler_cxx", "g++"); + parser.add_definition("compiler_objc", "gcc"); + parser.add_definition("compiler_objcxx", "g++"); + parser.add_definition("compiler_cpp", "gcc -E"); + parser.add_definition("compiler_objcpp", "gcc -E"); + parser.add_definition("compiler_objcxxcpp", "g++ -E"); + parser.add_definition("compiler_ar", "gcc-ar"); + parser.add_definition("compiler_ld", "ld.bfd"); + parser.add_definition("compiler_objcopy", "objcopy"); + parser.add_definition("compiler_nm", "gcc-nm"); + parser.add_definition("compiler_ranlib", "gcc-ranlib"); + parser.add_definition("compiler_strip", "strip"); + parser.add_definition("compiler_path", path); + } + + parser.add_definition("pgo_dir", format!("{}-pgo", build_dir.display())); + + add_tuning(target, pgo_stage, recipe, macros, &mut parser)?; + + Ok(Some(parser.parse(&content)?)) + } +} + +fn prepare_script(upstreams: &[stone_recipe::Upstream]) -> String { + use std::fmt::Write; + + let mut content = String::default(); + + for upstream in upstreams { + match upstream { + stone_recipe::Upstream::Plain { + uri, + rename, + strip_dirs, + unpack, + unpack_dir, + .. + } => { + if !*unpack { + continue; + } + let file_name = util::uri_file_name(uri); + let rename = rename.as_deref().unwrap_or(file_name); + let unpack_dir = unpack_dir + .as_ref() + .map(|dir| dir.display().to_string()) + .unwrap_or_else(|| rename.to_string()); + let strip_dirs = strip_dirs.unwrap_or(1); + + let _ = writeln!(&mut content, "mkdir -p {unpack_dir}"); + if rename.ends_with(".zip") { + let _ = writeln!( + &mut content, + r#"unzip -d "{unpack_dir}" "%(sourcedir)/{rename}" || (echo "Failed to extract arcive"; exit 1);"#, + ); + } else { + let _ = writeln!( + &mut content, + r#"tar xf "%(sourcedir)/{rename}" -C "{unpack_dir}" --strip-components={strip_dirs} --no-same-owner || (echo "Failed to extract arcive"; exit 1);"#, + ); + } + } + stone_recipe::Upstream::Git { uri, clone_dir, .. } => { + let source = util::uri_file_name(uri); + let target = clone_dir + .as_ref() + .map(|dir| dir.display().to_string()) + .unwrap_or_else(|| source.to_string()); + + let _ = writeln!(&mut content, "mkdir -p {target}"); + let _ = writeln!( + &mut content, + r#"cp -Ra --no-preserve=ownership "%(sourcedir)/{source}/." "{target}""#, + ); + } + } + } + + content +} + +fn add_tuning( + target: BuildTarget, + pgo_stage: Option, + recipe: &Recipe, + macros: &Macros, + parser: &mut script::Parser, +) -> Result<(), Error> { + let mut tuning = tuning::Builder::new(); + + let build_target = target.to_string(); + + for arch in ["base", &build_target] { + let macros = macros + .arch + .get(arch) + .cloned() + .ok_or_else(|| Error::MissingArchMacros(arch.to_string()))?; + + tuning.add_macros(macros); + } + + for macros in macros.actions.clone() { + tuning.add_macros(macros); + } + + tuning.enable("architecture", None)?; + + for kv in &recipe.tuning { + match &kv.value { + stone_recipe::Tuning::Enable => tuning.enable(&kv.key, None)?, + stone_recipe::Tuning::Disable => tuning.disable(&kv.key)?, + stone_recipe::Tuning::Config(config) => tuning.enable(&kv.key, Some(config.clone()))?, + } + } + + // Add defaults that aren't already in recipe + for group in default_tuning_groups(target, macros) { + if !recipe.tuning.iter().any(|kv| &kv.key == group) { + tuning.enable(group, None)?; + } + } + + if let Some(stage) = pgo_stage { + match stage { + pgo::Stage::One => tuning.enable("pgostage1", None)?, + pgo::Stage::Two => tuning.enable("pgostage2", None)?, + pgo::Stage::Use => { + tuning.enable("pgouse", None)?; + if recipe.options.samplepgo { + tuning.enable("pgosample", None)?; + } + } + } + } + + fn fmt_flags<'a>(flags: impl Iterator) -> String { + flags + .map(|s| s.trim()) + .filter(|s| s.len() > 1) + .collect::>() + .into_iter() + .join(" ") + } + + let toolchain = recipe.options.toolchain; + let flags = tuning.build()?; + + let cflags = fmt_flags( + flags + .iter() + .filter_map(|flag| flag.get(tuning::CompilerFlag::C, toolchain)), + ); + let cxxflags = fmt_flags( + flags + .iter() + .filter_map(|flag| flag.get(tuning::CompilerFlag::Cxx, toolchain)), + ); + let ldflags = fmt_flags( + flags + .iter() + .filter_map(|flag| flag.get(tuning::CompilerFlag::Ld, toolchain)), + ); + + parser.add_definition("cflags", cflags); + parser.add_definition("cxxflags", cxxflags); + parser.add_definition("ldflags", ldflags); + + Ok(()) +} + +fn default_tuning_groups(target: BuildTarget, macros: &Macros) -> &[String] { + let build_target = target.to_string(); + + for arch in [&build_target, "base"] { + let Some(arch_macros) = macros.arch.get(arch) else { + continue; + }; + + if arch_macros.default_tuning_groups.is_empty() { + continue; + } + + return &arch_macros.default_tuning_groups; + } + + &[] +} diff --git a/crates/boulder/src/lib.rs b/crates/boulder/src/lib.rs new file mode 100644 index 00000000..9f68f651 --- /dev/null +++ b/crates/boulder/src/lib.rs @@ -0,0 +1,27 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +pub use self::architecture::Architecture; +pub use self::builder::Builder; +pub use self::env::Env; +pub use self::job::Job; +pub use self::macros::Macros; +pub use self::paths::Paths; +pub use self::profile::Profile; +pub use self::runtime::Runtime; + +pub mod architecture; +pub mod builder; +pub mod container; +mod dependency; +pub mod env; +pub mod job; +mod macros; +pub mod paths; +pub mod pgo; +pub mod profile; +mod recipe; +pub mod root; +mod runtime; +pub mod upstream; +pub mod util; diff --git a/crates/boulder/src/macros.rs b/crates/boulder/src/macros.rs new file mode 100644 index 00000000..94f1d31d --- /dev/null +++ b/crates/boulder/src/macros.rs @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{collections::HashMap, fs, io, path::Path}; + +use thiserror::Error; + +use crate::{util, Env}; + +#[derive(Debug)] +pub struct Macros { + pub arch: HashMap, + pub actions: Vec, +} + +impl Macros { + pub fn load(env: &Env) -> Result { + let macros_dir = env.data_dir.join("macros"); + let actions_dir = macros_dir.join("actions"); + let arch_dir = macros_dir.join("arch"); + + let matcher = |p: &Path| p.extension().and_then(|s| s.to_str()) == Some("yml"); + + let arch_files = + util::sync::enumerate_files(&arch_dir, matcher).map_err(Error::ArchFiles)?; + let action_files = + util::sync::enumerate_files(&actions_dir, matcher).map_err(Error::ActionFiles)?; + + let mut arch = HashMap::new(); + let mut actions = vec![]; + + for file in arch_files { + let relative = file + .strip_prefix(&arch_dir) + .unwrap_or_else(|_| unreachable!()); + + let identifier = relative.with_extension("").display().to_string(); + + let bytes = fs::read(&file)?; + let macros = stone_recipe::macros::from_slice(&bytes)?; + + arch.insert(identifier, macros); + } + + for file in action_files { + let bytes = fs::read(&file)?; + let macros = stone_recipe::macros::from_slice(&bytes)?; + + actions.push(macros); + } + + Ok(Self { arch, actions }) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("loading macros from arch data dir")] + ArchFiles(#[source] io::Error), + #[error("loading macros from actions data dir")] + ActionFiles(#[source] io::Error), + #[error("deserialize macros file")] + Deserialize(#[from] stone_recipe::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/main.rs b/crates/boulder/src/main.rs index 2164065b..a69a7532 100644 --- a/crates/boulder/src/main.rs +++ b/crates/boulder/src/main.rs @@ -1,80 +1,32 @@ -use std::{path::PathBuf, process::Command}; +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 -use clap::Parser; -use tokio::fs::{create_dir, remove_dir_all}; +use std::error::Error; -#[tokio::main] -async fn main() -> Result<(), Box> { - let args = Args::parse(); +use tui::Stylize; - if args.chroot.exists() { - remove_dir_all(&args.chroot).await?; - } - create_dir(&args.chroot).await?; - - let mut client = moss::Client::new(&args.root) - .await? - .ephemeral(&args.chroot)?; - - client.install(BASE_PACKAGES, true).await?; - - container::run(args.chroot, move || { - let mut child = Command::new("/bin/bash") - .arg("--login") - .env_clear() - .env("HOME", "/root") - .env("PATH", "/usr/bin:/usr/sbin") - .env("TERM", "xterm-256color") - .spawn()?; - - child.wait()?; +mod cli; - Ok(()) - })?; - - Ok(()) +fn main() { + if let Err(error) = cli::process() { + report_error(error); + std::process::exit(1); + } } -#[derive(Debug, Parser)] -#[command()] -struct Args { - #[arg(short = 'D', long = "directory", global = true, default_value = "/")] - root: PathBuf, - chroot: PathBuf, +fn report_error(error: cli::Error) { + let sources = sources(&error); + let error = sources.join(": "); + eprintln!("{}: {error}", "Error".red()); } -const BASE_PACKAGES: &[&str] = &[ - "bash", - "boulder", - "coreutils", - "dash", - "dbus", - "dbus-broker", - "file", - "gawk", - "git", - "grep", - "gzip", - "inetutils", - "iproute2", - "less", - "linux-kvm", - "moss", - "moss-container", - "nano", - "neofetch", - "nss", - "openssh", - "procps", - "python", - "screen", - "sed", - "shadow", - "sudo", - "systemd", - "unzip", - "util-linux", - "vim", - "wget", - "which", -]; +fn sources(error: &cli::Error) -> Vec { + let mut sources = vec![error.to_string()]; + let mut source = error.source(); + while let Some(error) = source.take() { + sources.push(error.to_string()); + source = error.source(); + } + sources +} diff --git a/crates/boulder/src/paths.rs b/crates/boulder/src/paths.rs new file mode 100644 index 00000000..6877a635 --- /dev/null +++ b/crates/boulder/src/paths.rs @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + io, + path::{Path, PathBuf}, +}; + +use stone_recipe::Recipe; + +use crate::util; + +#[derive(Debug, Clone)] +pub struct Id(String); + +impl Id { + pub fn new(recipe: &Recipe) -> Self { + Self(format!( + "{}-{}-{}", + recipe.source.name, recipe.source.version, recipe.source.release + )) + } +} + +#[derive(Debug)] +pub struct Paths { + id: Id, + host_root: PathBuf, + guest_root: PathBuf, + recipe_dir: PathBuf, +} + +impl Paths { + pub fn new( + id: Id, + recipe_path: &Path, + host_root: impl Into, + guest_root: impl Into, + ) -> io::Result { + let recipe_dir = recipe_path + .parent() + .unwrap_or(&PathBuf::default()) + .canonicalize()?; + + let job = Self { + id, + host_root: host_root.into().canonicalize()?, + guest_root: guest_root.into(), + recipe_dir, + }; + + util::sync::ensure_dir_exists(&job.rootfs().host)?; + util::sync::ensure_dir_exists(&job.artefacts().host)?; + util::sync::ensure_dir_exists(&job.build().host)?; + util::sync::ensure_dir_exists(&job.ccache().host)?; + util::sync::ensure_dir_exists(&job.upstreams().host)?; + + Ok(job) + } + + pub fn rootfs(&self) -> Mapping { + Mapping { + host: self.host_root.join("root").join(&self.id.0), + guest: "/".into(), + } + } + + pub fn artefacts(&self) -> Mapping { + Mapping { + host: self.host_root.join("artefacts").join(&self.id.0), + guest: self.guest_root.join("artefacts"), + } + } + + pub fn build(&self) -> Mapping { + Mapping { + host: self.host_root.join("build").join(&self.id.0), + guest: self.guest_root.join("build"), + } + } + + pub fn ccache(&self) -> Mapping { + Mapping { + host: self.host_root.join("ccache"), + guest: self.guest_root.join("ccache"), + } + } + + pub fn upstreams(&self) -> Mapping { + Mapping { + host: self.host_root.join("upstreams"), + guest: self.guest_root.join("sourcedir"), + } + } + + pub fn recipe(&self) -> Mapping { + Mapping { + host: self.recipe_dir.clone(), + guest: self.guest_root.join("recipe"), + } + } + + pub fn install(&self) -> Mapping { + Mapping { + // TODO: Shitty impossible state, this folder + // doesn't exist on host + host: "".into(), + guest: self.guest_root.join("install"), + } + } + + /// For the provided [`Mapping`], return the guest + /// path as it lives on the host fs + /// + /// Example: + /// - host = "/var/cache/boulder/root/test" + /// - guest = "/mason/build" + /// - guest_host_path = "/var/cache/boulder/root/test/mason/build" + pub fn guest_host_path(&self, mapping: &Mapping) -> PathBuf { + let relative = mapping.guest.strip_prefix("/").unwrap_or(&mapping.guest); + + self.rootfs().host.join(relative) + } +} + +pub struct Mapping { + pub host: PathBuf, + pub guest: PathBuf, +} diff --git a/crates/boulder/src/pgo.rs b/crates/boulder/src/pgo.rs new file mode 100644 index 00000000..fe709bd3 --- /dev/null +++ b/crates/boulder/src/pgo.rs @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use stone_recipe::{tuning::Toolchain, Recipe}; + +use crate::architecture::BuildTarget; +use crate::recipe; + +pub fn stages(recipe: &Recipe, target: BuildTarget) -> Option> { + let build = recipe::build_target_definition(recipe, target); + + build.workload.is_some().then(|| { + let mut stages = vec![Stage::One]; + + if matches!(recipe.options.toolchain, Toolchain::Llvm) && recipe.options.cspgo { + stages.push(Stage::Two); + } + + stages.push(Stage::Use); + + stages + }) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, strum::Display)] +pub enum Stage { + #[strum(serialize = "stage1")] + One, + #[strum(serialize = "stage1")] + Two, + #[strum(serialize = "use")] + Use, +} diff --git a/crates/boulder/src/profile.rs b/crates/boulder/src/profile.rs new file mode 100644 index 00000000..23180485 --- /dev/null +++ b/crates/boulder/src/profile.rs @@ -0,0 +1,130 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{collections::HashMap, fmt}; + +use config::Config; +use moss::repository; +pub use moss::{repository::Priority, Repository}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +use crate::Env; + +/// A unique [`Profile`] identifier +#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct Id(String); + +impl Id { + pub fn new(identifier: String) -> Self { + Self( + identifier + .chars() + .map(|c| if c.is_alphanumeric() { c } else { '_' }) + .collect(), + ) + } +} + +impl fmt::Display for Id { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) + } +} + +impl From for Id { + fn from(value: String) -> Self { + Self::new(value) + } +} + +/// Profile configuration data +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Profile { + pub collections: repository::Map, +} + +/// A map of profiles +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Map(HashMap); + +impl Map { + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } + + pub fn with(items: impl IntoIterator) -> Self { + Self(items.into_iter().collect()) + } + + pub fn get(&self, id: &Id) -> Option<&Profile> { + self.0.get(id) + } + + pub fn add(&mut self, id: Id, profile: Profile) { + self.0.insert(id, profile); + } + + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } +} + +impl IntoIterator for Map { + type Item = (Id, Profile); + type IntoIter = std::collections::hash_map::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl Config for Map { + fn domain() -> String { + "profile".into() + } + + fn merge(self, other: Self) -> Self { + Self(self.0.into_iter().chain(other.0).collect()) + } +} + +pub struct Manager<'a> { + pub profiles: Map, + env: &'a Env, +} + +impl<'a> Manager<'a> { + pub async fn new(env: &'a Env) -> Manager<'a> { + let profiles = env.config.load::().await.unwrap_or_default(); + + Self { env, profiles } + } + + pub fn repositories(&self, profile: &Id) -> Result<&repository::Map, Error> { + self.profiles + .get(profile) + .map(|profile| &profile.collections) + .ok_or(Error::MissingProfile) + } + + pub async fn save_profile(&mut self, id: Id, profile: Profile) -> Result<(), Error> { + // Save config + let map = Map::with([(id.clone(), profile.clone())]); + self.env.config.save(id.clone(), &map).await?; + + // Add to profile map + self.profiles.add(id, profile); + + Ok(()) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("cannot find the provided profile")] + MissingProfile, + #[error("save profiles")] + SaveProfile(#[from] config::SaveError), +} diff --git a/crates/boulder/src/recipe.rs b/crates/boulder/src/recipe.rs new file mode 100644 index 00000000..9a7bae01 --- /dev/null +++ b/crates/boulder/src/recipe.rs @@ -0,0 +1,65 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use stone_recipe::Recipe; + +use crate::architecture::{self, BuildTarget}; + +pub fn build_targets(recipe: &Recipe) -> Vec { + let host = architecture::host(); + let host_string = host.to_string(); + + if recipe.architectures.is_empty() { + let mut targets = vec![BuildTarget::Native(host)]; + + if recipe.emul32 { + targets.push(BuildTarget::Emul32(host)); + } + + targets + } else { + let mut targets = vec![]; + + if recipe.architectures.contains(&host_string) + || recipe.architectures.contains(&"native".into()) + { + targets.push(BuildTarget::Native(host)); + } + + let emul32 = BuildTarget::Emul32(host); + let emul32_string = emul32.to_string(); + + if recipe.architectures.contains(&emul32_string) + || recipe.architectures.contains(&"emul32".into()) + { + targets.push(emul32); + } + + targets + } +} + +pub fn build_target_definition(recipe: &Recipe, target: BuildTarget) -> &stone_recipe::Build { + let mut build = &recipe.build; + + let target_string = target.to_string(); + + if let Some(profile) = recipe + .profiles + .iter() + .find(|profile| profile.key == target_string) + { + build = &profile.value; + } else if target.emul32() { + if let Some(profile) = recipe + .profiles + .iter() + .find(|profile| &profile.key == "emul32") + { + build = &profile.value; + } + } + + build +} diff --git a/crates/boulder/src/root.rs b/crates/boulder/src/root.rs new file mode 100644 index 00000000..67c67e33 --- /dev/null +++ b/crates/boulder/src/root.rs @@ -0,0 +1,67 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::io; + +use moss::repository; +use thiserror::Error; + +use crate::{container, dependency, util, Builder}; + +pub async fn populate(builder: &Builder, repositories: repository::Map) -> Result<(), Error> { + let packages = dependency::calculate(builder); + + let rootfs = builder.paths.rootfs().host; + + // Recreate root + util::recreate_dir(&rootfs).await?; + + let mut moss_client = moss::Client::new("boulder", &builder.env.moss_dir) + .await? + .explicit_repositories(repositories) + .await? + .ephemeral(&rootfs)?; + + moss_client.install(&packages, true).await?; + + Ok(()) +} + +pub fn clean(builder: &Builder) -> Result<(), Error> { + // Dont't need to clean if it doesn't exist + if !builder.paths.build().host.exists() { + return Ok(()); + } + + // We recreate inside the container so we don't + // get permissions error if this is a rootless build + // and there's subuid mappings into the user namespace + container::exec(&builder.paths, false, || { + // Recreate `install` dir + util::sync::recreate_dir(&builder.paths.install().guest)?; + + for target in &builder.targets { + for job in &target.jobs { + // Recerate build dir + util::sync::recreate_dir(&job.build_dir)?; + } + } + + Ok(()) + })?; + + Ok(()) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("io")] + Io(#[from] io::Error), + #[error("moss client")] + MossClient(#[from] moss::client::Error), + #[error("moss install")] + MossInstall(#[from] moss::client::install::Error), + #[error("container")] + Container(#[from] container::Error), +} diff --git a/crates/boulder/src/runtime.rs b/crates/boulder/src/runtime.rs new file mode 100644 index 00000000..c20be7a7 --- /dev/null +++ b/crates/boulder/src/runtime.rs @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{future::Future, io, thread, time::Duration}; + +use tokio::runtime; + +pub struct Runtime(runtime::Runtime); + +impl Runtime { + pub fn new() -> io::Result { + Ok(Self( + runtime::Builder::new_multi_thread().enable_all().build()?, + )) + } + + pub fn block_on(&self, task: F) -> T + where + F: Future, + { + self.0.block_on(task) + } + + pub fn destroy(self) { + drop(self); + // We want to ensure no threads exist before + // cloning into container. Sometimes a deadlock + // occurs which appears related to a race condition + // from some thread artifacts still existing. Adding + // this delay allows things to get cleaned up. + // NOTE: This appears to reliably fix the problem, + // I ran boulder 100 times w/ and w/out this delay + // and the deadlock never occured w/ it, but w/out + // it occured within 10 attempts. + thread::sleep(Duration::from_millis(50)); + } +} diff --git a/crates/boulder/src/upstream.rs b/crates/boulder/src/upstream.rs new file mode 100644 index 00000000..a2c2d451 --- /dev/null +++ b/crates/boulder/src/upstream.rs @@ -0,0 +1,455 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + io, + path::{Path, PathBuf}, + str::FromStr, + time::Duration, +}; + +use futures::{stream, StreamExt, TryStreamExt}; +use nix::unistd::{linkat, LinkatFlags}; +use sha2::{Digest, Sha256}; +use stone_recipe::Recipe; +use thiserror::Error; +use tokio::fs::{copy, remove_dir_all}; +use tokio::io::AsyncWriteExt; +use tokio::process::Command; +use tui::{MultiProgress, ProgressBar, ProgressStyle, Stylize}; +use url::Url; + +use crate::{util, Paths}; + +/// Cache all upstreams from the provided [`Recipe`] and make them available +/// in the guest rootfs. +pub async fn sync(recipe: &Recipe, paths: &Paths) -> Result<(), Error> { + let upstreams = recipe + .upstreams + .iter() + .cloned() + .map(Upstream::from_recipe) + .collect::, _>>()?; + + println!(); + println!( + "Sharing {} upstream(s) with the build container", + upstreams.len() + ); + println!(); + + let mp = MultiProgress::new(); + let tp = mp.add( + ProgressBar::new(upstreams.len() as u64).with_style( + ProgressStyle::with_template("\n|{bar:20.cyan/blue}| {pos}/{len}") + .unwrap() + .progress_chars("■≡=- "), + ), + ); + tp.tick(); + + let upstream_dir = paths.guest_host_path(&paths.upstreams()); + util::ensure_dir_exists(&upstream_dir).await?; + + stream::iter(&upstreams) + .map(|upstream| async { + let pb = mp.insert_before( + &tp, + ProgressBar::new(u64::MAX).with_message(format!( + "{} {}", + "Downloading".blue(), + upstream.name().bold(), + )), + ); + pb.enable_steady_tick(Duration::from_millis(150)); + + let install = upstream.fetch(paths, &pb).await?; + + pb.set_message(format!("{} {}", "Copying".yellow(), upstream.name().bold(),)); + pb.set_style( + ProgressStyle::with_template(" {spinner} {wide_msg} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); + + install.share(&upstream_dir).await?; + + let cached_tag = install + .was_cached() + .then_some(format!("{}", " (cached)".dim())) + .unwrap_or_default(); + + pb.finish(); + mp.remove(&pb); + mp.println(format!( + "{} {}{}", + "Shared".green(), + upstream.name().bold(), + cached_tag, + ))?; + tp.inc(1); + + Ok(()) as Result<_, Error> + }) + .buffer_unordered(moss::environment::MAX_NETWORK_CONCURRENCY) + .try_collect::<()>() + .await?; + + mp.clear()?; + println!(); + + Ok(()) +} + +enum Installed { + Plain { + name: String, + path: PathBuf, + was_cached: bool, + }, + Git { + name: String, + path: PathBuf, + was_cached: bool, + }, +} + +impl Installed { + fn was_cached(&self) -> bool { + match self { + Installed::Plain { was_cached, .. } => *was_cached, + Installed::Git { was_cached, .. } => *was_cached, + } + } + + async fn share(&self, dest_dir: &Path) -> Result<(), Error> { + match self { + Installed::Plain { name, path, .. } => { + let target = dest_dir.join(name); + + // Attempt hard link + let link_result = linkat(None, path, None, &target, LinkatFlags::NoSymlinkFollow); + + // Copy instead + if link_result.is_err() { + copy(&path, &target).await?; + } + } + Installed::Git { name, path, .. } => { + let target = dest_dir.join(name); + util::copy_dir(path, &target).await?; + } + } + + Ok(()) + } +} + +#[derive(Debug, Clone)] +pub enum Upstream { + Plain(Plain), + Git(Git), +} + +impl Upstream { + pub fn from_recipe(upstream: stone_recipe::Upstream) -> Result { + match upstream { + stone_recipe::Upstream::Plain { + uri, hash, rename, .. + } => Ok(Self::Plain(Plain { + uri, + hash: hash.parse()?, + rename, + })), + stone_recipe::Upstream::Git { + uri, + ref_id, + staging, + .. + } => Ok(Self::Git(Git { + uri, + ref_id, + staging, + })), + } + } + + fn name(&self) -> &str { + match self { + Upstream::Plain(plain) => plain.name(), + Upstream::Git(git) => git.name(), + } + } + + async fn fetch(&self, paths: &Paths, pb: &ProgressBar) -> Result { + match self { + Upstream::Plain(plain) => plain.fetch(paths, pb).await, + Upstream::Git(git) => git.fetch(paths, pb).await, + } + } +} + +#[derive(Debug, Clone)] +pub struct Hash(String); + +impl FromStr for Hash { + type Err = ParseHashError; + + fn from_str(s: &str) -> Result { + if s.len() < 5 { + return Err(ParseHashError::TooShort(s.to_string())); + } + + Ok(Self(s.to_string())) + } +} + +#[derive(Debug, Error)] +pub enum ParseHashError { + #[error("hash too short: {0}")] + TooShort(String), +} + +#[derive(Debug, Clone)] +pub struct Plain { + uri: Url, + hash: Hash, + rename: Option, +} + +impl Plain { + fn name(&self) -> &str { + if let Some(name) = &self.rename { + name + } else { + util::uri_file_name(&self.uri) + } + } + + async fn path(&self, paths: &Paths) -> PathBuf { + // Type safe guaranteed to be >= 5 bytes + let hash = &self.hash.0; + + let parent = paths + .upstreams() + .host + .join("fetched") + .join(&hash[..5]) + .join(&hash[hash.len() - 5..]); + + let _ = util::ensure_dir_exists(&parent).await; + + parent.join(hash) + } + + async fn fetch(&self, paths: &Paths, pb: &ProgressBar) -> Result { + use moss::request; + use tokio::fs; + + pb.set_style( + ProgressStyle::with_template(" {spinner} {wide_msg} {binary_bytes_per_sec:>.dim} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); + + let name = self.name(); + let path = self.path(paths).await; + + if path.exists() { + return Ok(Installed::Plain { + name: name.to_string(), + path, + was_cached: true, + }); + } + + let mut stream = request::get(self.uri.clone()).await?; + + let mut hasher = Sha256::new(); + let mut out = fs::File::create(&path).await?; + + while let Some(chunk) = stream.next().await { + let bytes = &chunk?; + pb.inc(bytes.len() as u64); + hasher.update(bytes); + out.write_all(bytes).await?; + } + + out.flush().await?; + + let hash = hex::encode(hasher.finalize()); + + if hash != self.hash.0 { + fs::remove_file(&path).await?; + + return Err(Error::HashMismatch { + name: name.to_string(), + expected: self.hash.0.clone(), + got: hash, + }); + } + + Ok(Installed::Plain { + name: name.to_string(), + path, + was_cached: false, + }) + } +} + +#[derive(Debug, Clone)] +pub struct Git { + uri: Url, + ref_id: String, + staging: bool, +} + +impl Git { + fn name(&self) -> &str { + util::uri_file_name(&self.uri) + } + + async fn final_path(&self, paths: &Paths) -> PathBuf { + let parent = paths.upstreams().host.join("git"); + + let _ = util::ensure_dir_exists(&parent).await; + + parent.join(util::uri_relative_path(&self.uri)) + } + + async fn staging_path(&self, paths: &Paths) -> PathBuf { + let parent = paths.upstreams().host.join("staging").join("git"); + + let _ = util::ensure_dir_exists(&parent).await; + + parent.join(util::uri_relative_path(&self.uri)) + } + + async fn fetch(&self, paths: &Paths, pb: &ProgressBar) -> Result { + pb.set_style( + ProgressStyle::with_template(" {spinner} {wide_msg} ") + .unwrap() + .tick_chars("--=≡■≡=--"), + ); + + let clone_path = if self.staging { + self.staging_path(paths).await + } else { + self.final_path(paths).await + }; + let clone_path_string = clone_path.display().to_string(); + + let final_path = self.final_path(paths).await; + let final_path_string = final_path.display().to_string(); + + if self.ref_exists(&final_path).await? { + self.reset_to_ref(&final_path).await?; + return Ok(Installed::Git { + name: self.name().to_string(), + path: final_path, + was_cached: true, + }); + } + + let _ = remove_dir_all(&clone_path).await; + if self.staging { + let _ = remove_dir_all(&final_path).await; + } + + let mut args = vec!["clone"]; + if self.staging { + args.push("--mirror"); + } + args.extend(["--", self.uri.as_str(), &clone_path_string]); + + self.run(&args, None).await?; + + if self.staging { + self.run( + &["clone", "--", &clone_path_string, &final_path_string], + None, + ) + .await?; + } + + self.reset_to_ref(&final_path).await?; + + Ok(Installed::Git { + name: self.name().to_string(), + path: final_path, + was_cached: false, + }) + } + + async fn ref_exists(&self, path: &Path) -> Result { + if !path.exists() { + return Ok(false); + } + + self.run(&["fetch"], Some(path)).await?; + + let result = self + .run(&["cat-file", "-e", &self.ref_id], Some(path)) + .await; + + Ok(result.is_ok()) + } + + async fn reset_to_ref(&self, path: &Path) -> Result<(), Error> { + self.run(&["reset", "--hard", &self.ref_id], Some(path)) + .await?; + + self.run( + &[ + "submodule", + "update", + "--init", + "--recursive", + "--depth", + "1", + "--jobs", + "4", + ], + Some(path), + ) + .await?; + + Ok(()) + } + + async fn run(&self, args: &[&str], cwd: Option<&Path>) -> Result<(), Error> { + let mut command = Command::new("git"); + + if let Some(dir) = cwd { + command.current_dir(dir); + } + + let output = command.args(args).output().await?; + + if !output.status.success() { + eprint!("{}", String::from_utf8_lossy(&output.stderr)); + return Err(Error::GitFailed(self.uri.clone())); + } + + Ok(()) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("failed to clone {0}")] + GitFailed(Url), + #[error("parse hash")] + ParseHash(#[from] ParseHashError), + #[error("hash mismatch for {name}, expected {expected:?} got {got:?}")] + HashMismatch { + name: String, + expected: String, + got: String, + }, + #[error("request")] + Request(#[from] moss::request::Error), + #[error("io")] + Io(#[from] io::Error), +} diff --git a/crates/boulder/src/util.rs b/crates/boulder/src/util.rs new file mode 100644 index 00000000..0e371adf --- /dev/null +++ b/crates/boulder/src/util.rs @@ -0,0 +1,147 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + io, + num::NonZeroUsize, + path::{Path, PathBuf}, + thread, +}; + +use futures::{future::BoxFuture, FutureExt}; +use tokio::fs::{copy, create_dir_all, read_dir, read_link, remove_dir_all, symlink}; +use url::Url; + +pub async fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { + if !path.exists() { + create_dir_all(path).await?; + } + Ok(()) +} + +pub async fn recreate_dir(path: &Path) -> Result<(), io::Error> { + if path.exists() { + remove_dir_all(path).await?; + } + create_dir_all(path).await?; + Ok(()) +} + +pub fn copy_dir<'a>( + source_dir: &'a Path, + out_dir: &'a Path, +) -> BoxFuture<'a, Result<(), io::Error>> { + async move { + recreate_dir(out_dir).await?; + + let mut contents = read_dir(&source_dir).await?; + + while let Some(entry) = contents.next_entry().await? { + let path = entry.path(); + + if let Some(file_name) = path.file_name() { + let dest = out_dir.join(file_name); + let meta = entry.metadata().await?; + + if meta.is_dir() { + copy_dir(&path, &dest).await?; + } else if meta.is_file() { + copy(&path, &dest).await?; + } else if meta.is_symlink() { + symlink(read_link(&path).await?, &dest).await?; + } + } + } + + Ok(()) + } + .boxed() +} + +pub async fn list_dirs(dir: &Path) -> Result, io::Error> { + let mut read_dir = read_dir(dir).await?; + + let mut paths = vec![]; + + while let Some(entry) = read_dir.next_entry().await? { + let path = entry.path(); + let meta = entry.metadata().await?; + + if meta.is_dir() { + paths.push(path); + } + } + + Ok(paths) +} + +pub fn uri_file_name(uri: &Url) -> &str { + let path = uri.path(); + + path.rsplit('/').next().unwrap_or_default() +} + +pub fn uri_relative_path(uri: &Url) -> &str { + let path = uri.path(); + + path.strip_prefix('/').unwrap_or_default() +} + +pub fn num_cpus() -> NonZeroUsize { + thread::available_parallelism().unwrap_or_else(|_| NonZeroUsize::new(1).unwrap()) +} + +pub fn is_root() -> bool { + use nix::unistd::Uid; + + Uid::effective().is_root() +} + +pub mod sync { + use std::{ + fs::{create_dir_all, remove_dir_all}, + io, + path::{Path, PathBuf}, + }; + + pub fn ensure_dir_exists(path: &Path) -> Result<(), io::Error> { + if !path.exists() { + create_dir_all(path)?; + } + Ok(()) + } + + pub fn recreate_dir(path: &Path) -> Result<(), io::Error> { + if path.exists() { + remove_dir_all(path)?; + } + create_dir_all(path)?; + Ok(()) + } + + pub fn enumerate_files<'a>( + dir: &'a Path, + matcher: impl Fn(&Path) -> bool + Send + Copy + 'a, + ) -> Result, io::Error> { + use std::fs::read_dir; + + let read_dir = read_dir(dir)?; + + let mut paths = vec![]; + + for entry in read_dir { + let entry = entry?; + let path = entry.path(); + let meta = entry.metadata()?; + + if meta.is_dir() { + paths.extend(enumerate_files(&path, matcher)?); + } else if meta.is_file() && matcher(&path) { + paths.push(path); + } + } + + Ok(paths) + } +} diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml new file mode 100644 index 00000000..0114487b --- /dev/null +++ b/crates/config/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "config" +version = "0.1.0" +edition.workspace = true + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +dirs.workspace = true +futures.workspace = true +serde.workspace = true +serde_yaml.workspace = true +thiserror.workspace = true +tokio-stream.workspace = true +tokio.workspace = true diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs new file mode 100644 index 00000000..a31af041 --- /dev/null +++ b/crates/config/src/lib.rs @@ -0,0 +1,283 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + fmt, + path::{Path, PathBuf}, +}; + +use futures::StreamExt; +use serde::{de::DeserializeOwned, Serialize}; +use thiserror::Error; +use tokio::{fs, io}; +use tokio_stream::wrappers::ReadDirStream; + +const EXTENSION: &str = "yaml"; + +pub trait Config: DeserializeOwned { + fn domain() -> String; + + fn merge(self, other: Self) -> Self; +} + +#[derive(Debug, Clone)] +pub struct Manager { + scope: Scope, +} + +impl Manager { + /// Config is loaded / merged from `usr/share` & `etc` relative to `root` + /// and saved to `etc/{program}/{domain}.d/{name}.yaml + pub fn system(root: impl Into, program: impl ToString) -> Self { + Self { + scope: Scope::System { + root: root.into(), + program: program.to_string(), + }, + } + } + + /// Config is loaded from $XDG_CONFIG_HOME and saved to + /// $XDG_CONFIG_HOME/{program}/{domain}.d/{name}.yaml + pub fn user(program: impl ToString) -> Result { + Ok(Self { + scope: Scope::User { + config: dirs::config_dir().ok_or(CreateUserError)?, + program: program.to_string(), + }, + }) + } + + /// Config is loaded from `path` and saved to + /// `path`/{domain}.d/{name}.yaml + pub fn custom(path: impl Into) -> Self { + Self { + scope: Scope::Custom(path.into()), + } + } + + pub async fn load(&self) -> Option { + let domain = T::domain(); + + let mut configs = vec![]; + + for (entry, resolve) in self.scope.load_with() { + for path in enumerate_paths(entry, resolve, &domain).await { + if let Some(config) = read_config(path).await { + configs.push(config); + } + } + } + + configs.into_iter().reduce(T::merge) + } + + pub async fn save( + &self, + name: impl fmt::Display, + config: &T, + ) -> Result<(), SaveError> { + let domain = T::domain(); + + let dir = self.scope.save_dir(&domain); + + fs::create_dir_all(&dir) + .await + .map_err(|io| SaveError::CreateDir(dir.clone(), io))?; + + let path = dir.join(format!("{name}.{EXTENSION}")); + + let serialized = serde_yaml::to_string(config)?; + + fs::write(&path, serialized) + .await + .map_err(|io| SaveError::Write(path, io))?; + + Ok(()) + } +} + +#[derive(Debug, Error)] +#[error("$HOME or $XDG_CONFIG_HOME env not set")] +pub struct CreateUserError; + +#[derive(Debug, Error)] +pub enum SaveError { + #[error("create config dir {0:?}")] + CreateDir(PathBuf, #[source] io::Error), + #[error("serialize config")] + Yaml(#[from] serde_yaml::Error), + #[error("write config file {0:?}")] + Write(PathBuf, #[source] io::Error), +} + +async fn enumerate_paths(entry: Entry, resolve: Resolve<'_>, domain: &str) -> Vec { + match entry { + Entry::File => { + let file = resolve.file(domain); + + if file.exists() { + vec![file] + } else { + vec![] + } + } + Entry::Directory => { + if let Ok(read_dir) = fs::read_dir(resolve.dir(domain)).await { + ReadDirStream::new(read_dir) + .filter_map(|entry| async { + let entry = entry.ok()?; + let path = entry.path(); + let extension = path + .extension() + .and_then(|ext| ext.to_str()) + .unwrap_or_default(); + + if path.exists() && extension == EXTENSION { + Some(path) + } else { + None + } + }) + .collect() + .await + } else { + vec![] + } + } + } +} + +async fn read_config(path: PathBuf) -> Option { + let bytes = fs::read(path).await.ok()?; + serde_yaml::from_slice(&bytes).ok() +} + +#[derive(Debug, Clone)] +enum Scope { + System { program: String, root: PathBuf }, + User { program: String, config: PathBuf }, + Custom(PathBuf), +} + +impl Scope { + fn save_dir<'a>(&'a self, domain: &'a str) -> PathBuf { + match &self { + Scope::System { root, program } => Resolve::System { + root, + base: SystemBase::Admin, + program, + }, + Scope::User { config, program } => Resolve::User { config, program }, + Scope::Custom(dir) => Resolve::Custom(dir), + } + .dir(domain) + } + + fn load_with(&self) -> Vec<(Entry, Resolve)> { + match &self { + // System we search / merge all base file / .d files + // from vendor then admin + Scope::System { root, program } => vec![ + ( + Entry::File, + Resolve::System { + root, + base: SystemBase::Vendor, + program, + }, + ), + ( + Entry::Directory, + Resolve::System { + root, + base: SystemBase::Vendor, + program, + }, + ), + ( + Entry::File, + Resolve::System { + root, + base: SystemBase::Admin, + program, + }, + ), + ( + Entry::Directory, + Resolve::System { + root, + base: SystemBase::Admin, + program, + }, + ), + ], + Scope::User { config, program } => { + vec![ + (Entry::File, Resolve::User { config, program }), + (Entry::Directory, Resolve::User { config, program }), + ] + } + Scope::Custom(root) => vec![ + (Entry::File, Resolve::Custom(root)), + (Entry::Directory, Resolve::Custom(root)), + ], + } + } +} + +#[derive(Clone, Copy)] +enum SystemBase { + Admin, + Vendor, +} + +impl SystemBase { + fn path(&self) -> &'static str { + match self { + SystemBase::Admin => "etc", + SystemBase::Vendor => "usr/share", + } + } +} + +enum Entry { + File, + Directory, +} + +enum Resolve<'a> { + System { + root: &'a Path, + base: SystemBase, + program: &'a str, + }, + User { + config: &'a Path, + program: &'a str, + }, + Custom(&'a Path), +} + +impl<'a> Resolve<'a> { + fn config_dir(&self) -> PathBuf { + match self { + Resolve::System { + root, + base, + program, + } => root.join(base.path()).join(program), + Resolve::User { config, program } => config.join(program), + Resolve::Custom(dir) => dir.to_path_buf(), + } + } + + fn file(&self, domain: &str) -> PathBuf { + self.config_dir().join(format!("{domain}.{EXTENSION}")) + } + + fn dir(&self, domain: &str) -> PathBuf { + self.config_dir().join(format!("{domain}.d")) + } +} diff --git a/crates/container/Cargo.toml b/crates/container/Cargo.toml index 62cf3de4..00ee6c48 100644 --- a/crates/container/Cargo.toml +++ b/crates/container/Cargo.toml @@ -7,3 +7,4 @@ edition.workspace = true [dependencies] nix.workspace = true +thiserror.workspace = true diff --git a/crates/container/src/idmap.rs b/crates/container/src/idmap.rs new file mode 100644 index 00000000..559e3c66 --- /dev/null +++ b/crates/container/src/idmap.rs @@ -0,0 +1,153 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::{fmt, fs, process::Command}; + +use nix::unistd::{getgid, getuid, Pid, User}; +use thiserror::Error; + +pub fn idmap(pid: Pid) -> Result<(), Error> { + let uid = getuid(); + let gid = getgid(); + let username = User::from_uid(uid)? + .map(|user| user.name) + .unwrap_or_default(); + + let subuid_mappings = load_sub_mappings(Kind::User, uid.as_raw(), &username)?; + let subgid_mappings = load_sub_mappings(Kind::Group, gid.as_raw(), &username)?; + + let uid_mappings = format_id_mappings(&subuid_mappings); + let gid_mappings = format_id_mappings(&subgid_mappings); + + add_id_mappings(pid, Kind::User, uid.as_raw(), &uid_mappings)?; + add_id_mappings(pid, Kind::Group, gid.as_raw(), &gid_mappings)?; + + Ok(()) +} + +#[derive(Debug, Clone, Copy)] +pub enum Kind { + User, + Group, +} + +impl fmt::Display for Kind { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Kind::User => "uid", + Kind::Group => "gid", + } + .fmt(f) + } +} + +fn load_sub_mappings(kind: Kind, id: u32, username: &str) -> Result, Error> { + let Ok(content) = fs::read_to_string(format!("/etc/sub{kind}")) else { + ensure_sub_count(kind, id, &[])?; + return Ok(vec![]); + }; + + let mut mappings = vec![]; + + let lines = content.lines(); + + for line in lines { + let mut split = line.split(':'); + + let user = split.next(); + let sub_id = split.next().and_then(|s| s.parse::().ok()); + let count = split.next().and_then(|s| s.parse::().ok()); + + if let (Some(user), Some(sub_id), Some(count)) = (user, sub_id, count) { + if user.parse::() == Ok(id) || user == username { + mappings.push(Submap { sub_id, count }); + } + } + } + + ensure_sub_count(kind, id, &mappings)?; + + Ok(mappings) +} + +fn ensure_sub_count(kind: Kind, id: u32, mappings: &[Submap]) -> Result<(), Error> { + let count = mappings.iter().map(|map| map.count).sum::(); + + if count < 1000 { + Err(Error::SubMappingCount(id, kind, count)) + } else { + Ok(()) + } +} + +fn format_id_mappings(sub_mappings: &[Submap]) -> Vec { + // Start mapping at 1 (root mapped to user) + let mut ns_id = 1; + + let mut id_mappings = vec![]; + + for submap in sub_mappings { + id_mappings.push(Idmap { + ns_id, + host_id: submap.sub_id, + count: submap.count, + }); + + ns_id += submap.count; + } + + id_mappings +} + +fn add_id_mappings(pid: Pid, kind: Kind, id: u32, mappings: &[Idmap]) -> Result<(), Error> { + let cmd = match kind { + Kind::User => "newuidmap", + Kind::Group => "newgidmap", + }; + let out = Command::new(cmd) + .arg(pid.as_raw().to_string()) + // Root mapping + .arg(0.to_string()) + .arg(id.to_string()) + .arg(1.to_string()) + // Sub mappings + .args(mappings.iter().flat_map(|mapping| { + [ + mapping.ns_id.to_string(), + mapping.host_id.to_string(), + mapping.count.to_string(), + ] + })) + .output() + .map_err(|e| Error::Command(e.to_string(), kind))?; + + if !out.status.success() { + return Err(Error::Command(format!("{}", out.status), kind)); + } + + Ok(()) +} + +#[derive(Debug)] +struct Submap { + sub_id: u32, + count: u32, +} + +#[derive(Debug)] +struct Idmap { + ns_id: u32, + host_id: u32, + count: u32, +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("\n\nAt least 1,000 sub{1} mappings are required for {1} {0}, found {2}\n\nMappings can be added to /etc/sub{1}")] + SubMappingCount(u32, Kind, u32), + #[error("new{1}map command failed: {0}")] + Command(String, Kind), + #[error("nix")] + Nix(#[from] nix::Error), +} diff --git a/crates/container/src/lib.rs b/crates/container/src/lib.rs index 6adc2a32..7c3a9107 100644 --- a/crates/container/src/lib.rs +++ b/crates/container/src/lib.rs @@ -1,84 +1,241 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 use std::env::set_current_dir; -use std::fs::{copy, create_dir, remove_dir, write}; -use std::path::Path; +use std::fs::{self, copy, create_dir_all, remove_dir}; +use std::io; +use std::path::{Path, PathBuf}; +use std::sync::atomic::{AtomicI32, Ordering}; use nix::libc::SIGCHLD; use nix::mount::{mount, umount2, MntFlags, MsFlags}; use nix::sched::{clone, CloneFlags}; -use nix::sys::wait::waitpid; -use nix::unistd::{close, getgid, getuid, pipe, pivot_root, read, sethostname}; +use nix::sys::prctl::set_pdeathsig; +use nix::sys::signal::{kill, sigaction, SaFlags, SigAction, SigHandler, Signal}; +use nix::sys::signalfd::SigSet; +use nix::sys::stat::{umask, Mode}; +use nix::sys::wait::{waitpid, WaitStatus}; +use nix::unistd::{close, pipe, pivot_root, read, sethostname, write, Pid, Uid}; +use thiserror::Error; -type Error = Box; +use self::idmap::idmap; -pub fn run(root: impl AsRef, mut f: impl FnMut() -> Result<(), Error>) -> Result<(), Error> { - static mut STACK: [u8; 4 * 1024 * 1024] = [0u8; 4 * 1024 * 1024]; +mod idmap; - let root = root.as_ref(); +pub struct Container { + root: PathBuf, + work_dir: Option, + binds: Vec, + networking: bool, + hostname: Option, + ignore_host_sigint: bool, +} + +impl Container { + pub fn new(root: impl Into) -> Self { + Self { + root: root.into(), + work_dir: None, + binds: vec![], + networking: false, + hostname: None, + ignore_host_sigint: false, + } + } + + pub fn work_dir(self, work_dir: impl Into) -> Self { + Self { + work_dir: Some(work_dir.into()), + ..self + } + } + + pub fn bind_rw(mut self, host: impl Into, guest: impl Into) -> Self { + self.binds.push(Bind { + source: host.into(), + target: guest.into(), + read_only: false, + }); + self + } + + pub fn bind_ro(mut self, host: impl Into, guest: impl Into) -> Self { + self.binds.push(Bind { + source: host.into(), + target: guest.into(), + read_only: true, + }); + self + } + + pub fn networking(self, enabled: bool) -> Self { + Self { + networking: enabled, + ..self + } + } + + pub fn hostname(self, hostname: impl ToString) -> Self { + Self { + hostname: Some(hostname.to_string()), + ..self + } + } + + /// Ignore `SIGINT` from the parent process. This allows it to be forwarded to a + /// spawned process inside the container by using [`forward_sigint`]. + pub fn ignore_host_sigint(self, ignore: bool) -> Self { + Self { + ignore_host_sigint: ignore, + ..self + } + } + + pub fn run(self, mut f: impl FnMut() -> Result<(), E>) -> Result<(), Error> + where + E: std::error::Error + 'static, + { + static mut STACK: [u8; 4 * 1024 * 1024] = [0u8; 4 * 1024 * 1024]; + + let rootless = !Uid::effective().is_root(); - // Pipe to synchronize parent & child - let sync = pipe()?; + // Pipe to synchronize parent & child + let sync = pipe()?; - let pid = unsafe { - clone( - Box::new(|| match enter(root, sync, &mut f) { - Ok(_) => 0, - Err(e) => { - eprintln!("Error: {e}"); - 1 + let mut flags = CloneFlags::CLONE_NEWNS + | CloneFlags::CLONE_NEWPID + | CloneFlags::CLONE_NEWIPC + | CloneFlags::CLONE_NEWUTS; + + if rootless { + flags |= CloneFlags::CLONE_NEWUSER; + } + + if !self.networking { + flags |= CloneFlags::CLONE_NEWNET; + } + + let pid = unsafe { + clone( + Box::new(|| match enter(&self, sync, &mut f) { + Ok(_) => 0, + // Write error back to parent process + Err(error) => { + let error = error.to_string(); + let mut pos = 0; + + while pos < error.len() { + let Ok(len) = write(sync.1, &error.as_bytes()[pos..]) else { + break; + }; + + pos += len; + } + + let _ = close(sync.1); + + 1 + } + }), + &mut STACK, + flags, + Some(SIGCHLD), + )? + }; + + // Update uid / gid map to map current user to root in container + if rootless { + idmap(pid)?; + } + + // Allow child to continue + write(sync.1, &[Message::Continue as u8])?; + // Write no longer needed + close(sync.1)?; + + if self.ignore_host_sigint { + ignore_sigint()?; + } + + let status = waitpid(pid, None)?; + + if self.ignore_host_sigint { + default_sigint()?; + } + + match status { + WaitStatus::Exited(_, 0) => Ok(()), + WaitStatus::Exited(_, _) => { + let mut error = String::new(); + let mut buffer = [0u8; 1024]; + + loop { + let len = read(sync.0, &mut buffer)?; + + if len == 0 { + break; + } + + error.push_str(String::from_utf8_lossy(&buffer[..len]).as_ref()); } - }), - &mut STACK, - CloneFlags::CLONE_NEWNS - | CloneFlags::CLONE_NEWPID - | CloneFlags::CLONE_NEWIPC - | CloneFlags::CLONE_NEWUTS - | CloneFlags::CLONE_NEWUSER, - Some(SIGCHLD), - )? - }; - - // Update uid / gid map to map current user to root in container - write(format!("/proc/{pid}/setgroups"), "deny")?; - write(format!("/proc/{pid}/uid_map"), format!("0 {} 1", getuid()))?; - write(format!("/proc/{pid}/gid_map"), format!("0 {} 1", getgid()))?; - - // Allow child to continue - close(sync.1)?; - - waitpid(pid, None)?; - Ok(()) + Err(Error::Failure(error)) + } + WaitStatus::Signaled(_, signal, _) => Err(Error::Signaled(signal)), + WaitStatus::Stopped(_, _) + | WaitStatus::PtraceEvent(_, _, _) + | WaitStatus::PtraceSyscall(_) + | WaitStatus::Continued(_) + | WaitStatus::StillAlive => Err(Error::UnknownExit), + } + } } -fn enter( - root: &Path, +fn enter( + container: &Container, sync: (i32, i32), - mut f: impl FnMut() -> Result<(), Error>, -) -> Result<(), Error> { - // Close unused write end - close(sync.1)?; - // Got EOF, continue - read(sync.0, &mut [0u8; 1])?; + mut f: impl FnMut() -> Result<(), E>, +) -> Result<(), ContainerError> +where + E: std::error::Error + 'static, +{ + // Ensure process is cleaned up if parent dies + set_pdeathsig(Signal::SIGKILL)?; + + // Wait for continue message + let mut message = [0u8; 1]; + read(sync.0, &mut message)?; + assert_eq!(message[0], Message::Continue as u8); + + // Close unused read end close(sync.0)?; - setup(root)?; + setup(container)?; - f() + f().map_err(|e| ContainerError::Run(Box::new(e))) } -fn setup(root: &Path) -> Result<(), Error> { - // TODO: conditional networking - setup_networking(root)?; +fn setup(container: &Container) -> Result<(), ContainerError> { + if container.networking { + setup_networking(&container.root)?; + } - pivot(root)?; + pivot(&container.root, &container.binds)?; setup_root_user()?; - sethostname("boulder")?; + + if let Some(hostname) = &container.hostname { + sethostname(hostname)?; + } + + if let Some(dir) = &container.work_dir { + set_current_dir(dir)?; + } Ok(()) } -fn pivot(root: &Path) -> Result<(), Error> { +fn pivot(root: &Path, binds: &[Bind]) -> Result<(), ContainerError> { const OLD_PATH: &str = "old_root"; let old_root = root.join(OLD_PATH); @@ -86,6 +243,23 @@ fn pivot(root: &Path) -> Result<(), Error> { add_mount(None, "/", None, MsFlags::MS_REC | MsFlags::MS_PRIVATE)?; add_mount(Some(root), root, None, MsFlags::MS_BIND)?; + for bind in binds { + let source = bind.source.canonicalize()?; + let target = root.join(bind.target.strip_prefix("/").unwrap_or(&bind.target)); + + add_mount(Some(&source), &target, None, MsFlags::MS_BIND)?; + + // Remount to enforce readonly flag + if bind.read_only { + add_mount( + Some(source), + target, + None, + MsFlags::MS_BIND | MsFlags::MS_REMOUNT | MsFlags::MS_RDONLY, + )?; + } + } + enusure_directory(&old_root)?; pivot_root(root, &old_root)?; @@ -112,24 +286,25 @@ fn pivot(root: &Path) -> Result<(), Error> { Ok(()) } -fn setup_root_user() -> Result<(), Error> { - write("/etc/passwd", "root:x:0:0:root:/root:/bin/bash")?; - write("/etc/group", "root:x:0:")?; - enusure_directory("/root")?; +fn setup_root_user() -> Result<(), ContainerError> { + enusure_directory("/etc")?; + fs::write("/etc/passwd", "root:x:0:0:root::/bin/bash")?; + fs::write("/etc/group", "root:x:0:")?; + umask(Mode::S_IWGRP | Mode::S_IWOTH); Ok(()) } -fn setup_networking(root: &Path) -> Result<(), Error> { +fn setup_networking(root: &Path) -> Result<(), ContainerError> { enusure_directory(root.join("etc"))?; copy("/etc/resolv.conf", root.join("etc/resolv.conf"))?; copy("/etc/protocols", root.join("etc/protocols"))?; Ok(()) } -fn enusure_directory(path: impl AsRef) -> Result<(), Error> { +fn enusure_directory(path: impl AsRef) -> Result<(), ContainerError> { let path = path.as_ref(); if !path.exists() { - create_dir(path)?; + create_dir_all(path)?; } Ok(()) } @@ -139,7 +314,7 @@ fn add_mount>( target: T, fs_type: Option<&str>, flags: MsFlags, -) -> Result<(), Error> { +) -> Result<(), ContainerError> { enusure_directory(&target)?; mount( source.as_ref().map(AsRef::as_ref), @@ -150,3 +325,73 @@ fn add_mount>( )?; Ok(()) } + +fn ignore_sigint() -> Result<(), nix::Error> { + let action = SigAction::new(SigHandler::SigIgn, SaFlags::empty(), SigSet::empty()); + unsafe { sigaction(Signal::SIGINT, &action)? }; + Ok(()) +} + +fn default_sigint() -> Result<(), nix::Error> { + let action = SigAction::new(SigHandler::SigDfl, SaFlags::empty(), SigSet::empty()); + unsafe { sigaction(Signal::SIGINT, &action)? }; + Ok(()) +} + +/// Forwards `SIGINT` from the current process to the [`Pid`] process +pub fn forward_sigint(pid: Pid) -> Result<(), nix::Error> { + static PID: AtomicI32 = AtomicI32::new(0); + + PID.store(pid.as_raw(), Ordering::Relaxed); + + extern "C" fn on_int(_: i32) { + let pid = Pid::from_raw(PID.load(Ordering::Relaxed)); + let _ = kill(pid, Signal::SIGINT); + } + + let action = SigAction::new( + SigHandler::Handler(on_int), + SaFlags::empty(), + SigSet::empty(), + ); + unsafe { sigaction(Signal::SIGINT, &action)? }; + + Ok(()) +} + +struct Bind { + source: PathBuf, + target: PathBuf, + read_only: bool, +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("exited with failure: {0}")] + Failure(String), + #[error("stopped by signal: {}", .0.as_str())] + Signaled(Signal), + #[error("unknown exit reason")] + UnknownExit, + #[error("error setting up rootless id map")] + Idmap(#[from] idmap::Error), + #[error("nix")] + Nix(#[from] nix::Error), + #[error("io")] + Io(#[from] io::Error), +} + +#[derive(Debug, Error)] +enum ContainerError { + #[error(transparent)] + Run(#[from] Box), + #[error(transparent)] + Nix(#[from] nix::Error), + #[error(transparent)] + Io(#[from] io::Error), +} + +#[repr(u8)] +enum Message { + Continue = 1, +} diff --git a/crates/moss/Cargo.toml b/crates/moss/Cargo.toml index bab60976..ab4ca831 100644 --- a/crates/moss/Cargo.toml +++ b/crates/moss/Cargo.toml @@ -4,8 +4,9 @@ version = "0.1.0" edition.workspace = true [dependencies] +config = { path = "../config" } dag = { path = "../dag" } -stone = { version = "0.1.0", path = "../stone" } +stone = { path = "../stone" } tui = { path = "../tui" } vfs = { path = "../vfs" } @@ -30,3 +31,4 @@ tokio-stream.workspace = true tokio-util.workspace = true thiserror.workspace = true url.workspace = true +xxhash-rust.workspace = true diff --git a/crates/moss/src/cli/info.rs b/crates/moss/src/cli/info.rs index a2677531..f3519ff5 100644 --- a/crates/moss/src/cli/info.rs +++ b/crates/moss/src/cli/info.rs @@ -9,6 +9,7 @@ use futures::StreamExt; use itertools::Itertools; use moss::{ client::{self, Client}, + environment, package::Flags, Package, Provider, }; @@ -34,7 +35,7 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { .collect::>(); let root = args.get_one::("root").unwrap().clone(); - let client = Client::new(root).await?; + let client = Client::new(environment::NAME, root).await?; for pkg in pkgs { let lookup = Provider::from_name(&pkg).unwrap(); diff --git a/crates/moss/src/cli/install.rs b/crates/moss/src/cli/install.rs index 2eac7b33..b67824d7 100644 --- a/crates/moss/src/cli/install.rs +++ b/crates/moss/src/cli/install.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use clap::{arg, value_parser, ArgMatches, Command}; -use moss::client::Client; +use moss::{client::Client, environment}; pub use moss::client::install::Error; @@ -36,7 +36,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let yes = *args.get_one::("yes").unwrap(); // Grab a client for the root - let mut client = Client::new(root).await?; + let mut client = Client::new(environment::NAME, root).await?; // Make ephemeral if a blit target was provided if let Some(blit_target) = args.get_one::("to").cloned() { diff --git a/crates/moss/src/cli/list.rs b/crates/moss/src/cli/list.rs index 0baee68a..cd343450 100644 --- a/crates/moss/src/cli/list.rs +++ b/crates/moss/src/cli/list.rs @@ -11,6 +11,7 @@ use thiserror::Error; use moss::{ client::{self, Client}, + environment, package::Flags, }; use tui::Stylize; @@ -63,7 +64,7 @@ pub async fn handle(args: &ArgMatches) -> Result<(), Error> { }; // Grab a client for the target, enumerate packages - let client = Client::new(root).await?; + let client = Client::new(environment::NAME, root).await?; let pkgs = client.registry.list(filter_flags).collect::>().await; let sync_available = if sync.is_some() { diff --git a/crates/moss/src/cli/remove.rs b/crates/moss/src/cli/remove.rs index e6192c0c..28399831 100644 --- a/crates/moss/src/cli/remove.rs +++ b/crates/moss/src/cli/remove.rs @@ -9,6 +9,7 @@ use futures::StreamExt; use itertools::{Either, Itertools}; use moss::{ client::{self, Client}, + environment, package::Flags, registry::transaction, state::Selection, @@ -34,7 +35,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { .collect::>(); // Grab a client for the target, enumerate packages - let client = Client::new(root).await?; + let client = Client::new(environment::NAME, root).await?; let installed = client .registry diff --git a/crates/moss/src/cli/repo.rs b/crates/moss/src/cli/repo.rs index 518cd405..68dba3ab 100644 --- a/crates/moss/src/cli/repo.rs +++ b/crates/moss/src/cli/repo.rs @@ -72,6 +72,8 @@ pub fn command() -> Command { /// Handle subcommands to `repo` pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { + let config = config::Manager::system(root, "moss"); + let handler = match args.subcommand() { Some(("add", cmd_args)) => Action::Add( root, @@ -92,18 +94,19 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { // dispatch to runtime handler function match handler { - Action::List(root) => list(root).await, + Action::List(root) => list(root, config).await, Action::Add(root, name, uri, comment, priority) => { - add(root, name, uri, comment, priority).await + add(root, config, name, uri, comment, priority).await } Action::Remove(_, _) => unimplemented!(), - Action::Update(root, name) => update(root, name).await, + Action::Update(root, name) => update(root, config, name).await, } } // Actual implementation of moss repo add, asynchronous async fn add( root: &Path, + config: config::Manager, name: String, uri: Url, comment: String, @@ -111,7 +114,7 @@ async fn add( ) -> Result<(), Error> { let installation = Installation::open(root); - let mut manager = repository::Manager::new(installation).await?; + let mut manager = repository::Manager::system(config, installation).await?; manager .add_repository( @@ -130,9 +133,9 @@ async fn add( } /// List the repositories and pretty print them -async fn list(root: &Path) -> Result<(), Error> { +async fn list(root: &Path, config: config::Manager) -> Result<(), Error> { let installation = Installation::open(root); - let manager = repository::Manager::new(installation).await?; + let manager = repository::Manager::system(config, installation).await?; let configured_repos = manager.list(); if configured_repos.len() == 0 { @@ -150,9 +153,9 @@ async fn list(root: &Path) -> Result<(), Error> { } /// Update specific repos or all -async fn update(root: &Path, which: Option) -> Result<(), Error> { +async fn update(root: &Path, config: config::Manager, which: Option) -> Result<(), Error> { let installation = Installation::open(root); - let mut manager = repository::Manager::new(installation).await?; + let mut manager = repository::Manager::system(config, installation).await?; match which { Some(repo) => manager.refresh(&repository::Id::new(repo)).await?, diff --git a/crates/moss/src/cli/state.rs b/crates/moss/src/cli/state.rs index c66e04fd..38bf1891 100644 --- a/crates/moss/src/cli/state.rs +++ b/crates/moss/src/cli/state.rs @@ -8,7 +8,7 @@ use clap::{arg, ArgAction, ArgMatches, Command}; use futures::{stream, StreamExt, TryFutureExt, TryStreamExt}; use moss::{ client::{self, prune, Client}, - state, + environment, state, }; use thiserror::Error; use tui::Stylize; @@ -39,7 +39,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { /// List all known states, newest first pub async fn list(root: &Path) -> Result<(), Error> { - let client = Client::new(root).await?; + let client = Client::new(environment::NAME, root).await?; let state_ids = client.state_db.list_ids().await?; @@ -56,7 +56,7 @@ pub async fn list(root: &Path) -> Result<(), Error> { pub async fn prune(args: &ArgMatches, root: &Path) -> Result<(), Error> { let keep = *args.get_one::("keep").unwrap(); - let client = Client::new(root).await?; + let client = Client::new(environment::NAME, root).await?; client.prune(prune::Strategy::KeepRecent(keep)).await?; Ok(()) diff --git a/crates/moss/src/cli/sync.rs b/crates/moss/src/cli/sync.rs index 8073c473..24a9c706 100644 --- a/crates/moss/src/cli/sync.rs +++ b/crates/moss/src/cli/sync.rs @@ -40,7 +40,7 @@ pub async fn handle(args: &ArgMatches, root: &Path) -> Result<(), Error> { let yes_all = *args.get_one::("yes").unwrap(); let upgrade_only = *args.get_one::("upgrade-only").unwrap(); - let mut client = Client::new(root).await?; + let mut client = Client::new(environment::NAME, root).await?; // Make ephemeral if a blit target was provided if let Some(blit_target) = args.get_one::("to").cloned() { diff --git a/crates/moss/src/client/mod.rs b/crates/moss/src/client/mod.rs index e2bd56d4..cf782203 100644 --- a/crates/moss/src/client/mod.rs +++ b/crates/moss/src/client/mod.rs @@ -40,6 +40,7 @@ pub mod prune; /// A Client is a connection to the underlying package management systems pub struct Client { + pub name: String, /// Root that we operate on pub installation: Installation, pub registry: Registry, @@ -48,35 +49,38 @@ pub struct Client { pub state_db: db::state::Database, pub layout_db: db::layout::Database, + config: config::Manager, repositories: repository::Manager, scope: Scope, } impl Client { /// Construct a new Client - pub async fn new(root: impl Into) -> Result { + pub async fn new( + client_name: impl ToString, + root: impl Into, + ) -> Result { let root = root.into(); if !root.exists() || !root.is_dir() { return Err(Error::RootInvalid); } + let config = config::Manager::system(&root, "moss"); let installation = Installation::open(root); - let repositories = repository::Manager::new(installation.clone()).await?; + let repositories = + repository::Manager::system(config.clone(), installation.clone()).await?; let install_db = db::meta::Database::new(installation.db_path("install"), installation.read_only()) .await?; let state_db = db::state::Database::new(&installation).await?; let layout_db = db::layout::Database::new(&installation).await?; - let state = match installation.active_state { - Some(id) => Some(state_db.get(&id).await?), - None => None, - }; - - let registry = build_registry(&repositories, &install_db, state).await?; + let registry = build_registry(&installation, &repositories, &install_db, &state_db).await?; Ok(Client { + name: client_name.to_string(), + config, installation, repositories, registry, @@ -87,11 +91,6 @@ impl Client { }) } - /// Construct a new Client for the global installation - pub async fn system() -> Result { - Client::new("/").await - } - pub fn is_ephemeral(&self) -> bool { matches!(self.scope, Scope::Ephemeral { .. }) } @@ -121,21 +120,47 @@ impl Client { }) } + /// Transition the client to use the provided explicit repositories, instead of loading + /// repository configuration from moss config folders + pub async fn explicit_repositories( + mut self, + repositories: repository::Map, + ) -> Result { + self.repositories = + repository::Manager::explicit(&self.name, repositories, self.installation.clone()) + .await?; + + // Rebuild registry + self.registry = build_registry( + &self.installation, + &self.repositories, + &self.install_db, + &self.state_db, + ) + .await?; + + Ok(self) + } + /// Reload all configured repositories and refreshes their index file, then update /// registry with all active repositories. pub async fn refresh_repositories(&mut self) -> Result<(), Error> { - // Reload manager and refresh all repositories - self.repositories = repository::Manager::new(self.installation.clone()).await?; - self.repositories.refresh_all().await?; - - // Refresh State DB - let state = match self.installation.active_state { - Some(id) => Some(self.state_db.get(&id).await?), - None => None, + // Reload manager if not explicit to pickup config changes + // then refresh indexes + if !self.repositories.is_explicit() { + self.repositories = + repository::Manager::system(self.config.clone(), self.installation.clone()).await? }; + self.repositories.refresh_all().await?; // Rebuild registry - self.registry = build_registry(&self.repositories, &self.install_db, state).await?; + self.registry = build_registry( + &self.installation, + &self.repositories, + &self.install_db, + &self.state_db, + ) + .await?; Ok(()) } @@ -628,6 +653,7 @@ BUG_REPORT_URL="https://github.com/serpent-os""#, Ok(()) } + enum Scope { Stateful, Ephemeral { blit_root: PathBuf }, @@ -708,10 +734,16 @@ impl From for PendingFile { } async fn build_registry( + installation: &Installation, repositories: &repository::Manager, installdb: &db::meta::Database, - state: Option, + statedb: &db::state::Database, ) -> Result { + let state = match installation.active_state { + Some(id) => Some(statedb.get(&id).await?), + None => None, + }; + let mut registry = Registry::default(); registry.add_plugin(Plugin::Cobble(plugin::Cobble::default())); diff --git a/crates/moss/src/environment.rs b/crates/moss/src/environment.rs index 4512deef..4e6cf03b 100644 --- a/crates/moss/src/environment.rs +++ b/crates/moss/src/environment.rs @@ -2,6 +2,7 @@ // // SPDX-License-Identifier: MPL-2.0 +pub const NAME: &str = env!("CARGO_PKG_NAME"); pub const VERSION: &str = env!("CARGO_PKG_VERSION"); /// Max concurrency for disk tasks pub const MAX_DISK_CONCURRENCY: usize = 16; diff --git a/crates/moss/src/lib.rs b/crates/moss/src/lib.rs index b8c02b7e..192d2a1e 100644 --- a/crates/moss/src/lib.rs +++ b/crates/moss/src/lib.rs @@ -6,7 +6,6 @@ #![allow(unused_variables, dead_code)] pub use self::client::Client; -pub use self::config::Config; pub use self::dependency::{Dependency, Provider}; pub use self::installation::Installation; pub use self::package::Package; @@ -15,7 +14,6 @@ pub use self::repository::Repository; pub use self::state::State; pub mod client; -pub mod config; pub mod db; pub mod dependency; pub mod environment; @@ -23,6 +21,6 @@ pub mod installation; pub mod package; pub mod registry; pub mod repository; -mod request; +pub mod request; pub mod state; pub mod stone; diff --git a/crates/moss/src/repository/manager.rs b/crates/moss/src/repository/manager.rs index 34dd60d1..6f8901b6 100644 --- a/crates/moss/src/repository/manager.rs +++ b/crates/moss/src/repository/manager.rs @@ -3,35 +3,88 @@ // SPDX-License-Identifier: MPL-2.0 use std::collections::HashMap; +use std::path::PathBuf; use futures::{future, StreamExt, TryStreamExt}; use thiserror::Error; use tokio::{fs, io}; +use xxhash_rust::xxh3::xxh3_64; use crate::db::meta; -use crate::{config, package, Installation}; use crate::{environment, stone}; +use crate::{package, Installation}; use crate::repository::{self, Repository}; +enum Source { + System(config::Manager), + Explicit { + identifier: String, + repos: repository::Map, + }, +} + +impl Source { + fn identifier(&self) -> &str { + match self { + Source::System(_) => environment::NAME, + Source::Explicit { identifier, .. } => identifier, + } + } +} + /// Manage a bunch of repositories pub struct Manager { + source: Source, installation: Installation, repositories: HashMap, } impl Manager { - /// Create a [`Manager`] for the supplied [`Installation`] - pub async fn new(installation: Installation) -> Result { - // Load all configs, default if none exist - let configs = config::load::(&installation.root) - .await - .unwrap_or_default(); + pub fn is_explicit(&self) -> bool { + matches!(self.source, Source::Explicit { .. }) + } + + /// Create a [`Manager`] for the supplied [`Installation`] using system configurations + pub async fn system( + config: config::Manager, + installation: Installation, + ) -> Result { + Self::new(Source::System(config), installation).await + } + + /// Create a [`Manager`] for the supplied [`Installation`] using the provided configurations + /// + /// [`Manager`] can't be used to `add` new repos in this mode + pub async fn explicit( + identifier: impl ToString, + repos: repository::Map, + installation: Installation, + ) -> Result { + Self::new( + Source::Explicit { + identifier: identifier.to_string(), + repos, + }, + installation, + ) + .await + } + + async fn new(source: Source, installation: Installation) -> Result { + let configs = match &source { + Source::System(config) => + // Load all configs, default if none exist + { + config.load::().await.unwrap_or_default() + } + Source::Explicit { repos, .. } => repos.clone(), + }; // Open all repo meta dbs and collect into hash map let repositories = future::try_join_all(configs.into_iter().map(|(id, repository)| async { - let db = open_meta_db(&id, &installation).await?; + let db = open_meta_db(source.identifier(), &repository, &installation).await?; Ok::<_, Error>((id.clone(), repository::Active { id, repository, db })) })) @@ -40,6 +93,7 @@ impl Manager { .collect(); Ok(Self { + source, installation, repositories, }) @@ -51,18 +105,20 @@ impl Manager { id: repository::Id, repository: Repository, ) -> Result<(), Error> { + let Source::System(config) = &self.source else { + return Err(Error::ExplicitUnsupported); + }; + // Save repo as new config file // We save it as a map for easy merging across // multiple configuration files { let map = repository::Map::with([(id.clone(), repository.clone())]); - config::save(&self.installation.root, &id, &map) - .await - .map_err(Error::SaveConfig)?; + config.save(&id, &map).await.map_err(Error::SaveConfig)?; } - let db = open_meta_db(&id, &self.installation).await?; + let db = open_meta_db(self.source.identifier(), &repository, &self.installation).await?; self.repositories .insert(id.clone(), repository::Active { id, repository, db }); @@ -70,25 +126,14 @@ impl Manager { Ok(()) } - /// Remove a [`Repository`] - pub async fn remove_repository(&mut self, id: repository::Id) -> Result<(), Error> { - self.repositories.remove(&id); - - let path = self.installation.repo_path(id.to_string()); - - fs::remove_dir_all(path).await.map_err(Error::RemoveDir)?; - - Ok(()) - } - /// Refresh all [`Repository`]'s by fetching it's latest index /// file and updating it's associated meta database pub async fn refresh_all(&mut self) -> Result<(), Error> { // Fetch index file + add to meta_db future::try_join_all( - self.repositories - .iter() - .map(|(id, state)| refresh_index(id, state, &self.installation)), + self.repositories.iter().map(|(id, state)| { + refresh_index(self.source.identifier(), state, &self.installation) + }), ) .await?; @@ -98,7 +143,7 @@ impl Manager { /// Refresh a [`Repository`] by Id pub async fn refresh(&mut self, id: &repository::Id) -> Result<(), Error> { if let Some(repo) = self.repositories.get(id) { - refresh_index(id, repo, &self.installation).await + refresh_index(self.source.identifier(), repo, &self.installation).await } else { Err(Error::UnknownRepo(id.clone())) } @@ -117,13 +162,23 @@ impl Manager { } } +/// Directory for the repo cached data (db & stone index), hashed by identifier & repo URI +fn cache_dir(identifier: &str, repo: &Repository, installation: &Installation) -> PathBuf { + let hash = format!( + "{:02x}", + xxh3_64(format!("{}-{}", identifier, repo.uri).as_bytes()) + ); + installation.repo_path(hash) +} + /// Open the meta db file, ensuring it's /// directory exists async fn open_meta_db( - id: &repository::Id, + identifier: &str, + repo: &Repository, installation: &Installation, ) -> Result { - let dir = installation.repo_path(id.to_string()); + let dir = cache_dir(identifier, repo, installation); fs::create_dir_all(&dir).await.map_err(Error::CreateDir)?; @@ -136,11 +191,11 @@ async fn open_meta_db( /// saves it to the repo installation path, then /// loads it's metadata into the meta db async fn refresh_index( - id: &repository::Id, + identifier: &str, state: &repository::Active, installation: &Installation, ) -> Result<(), Error> { - let out_dir = installation.repo_path(id.to_string()); + let out_dir = cache_dir(identifier, &state.repository, installation); fs::create_dir_all(&out_dir) .await @@ -203,6 +258,8 @@ async fn refresh_index( #[derive(Debug, Error)] pub enum Error { + #[error("Can't add repos when using explicit configs")] + ExplicitUnsupported, #[error("Missing metadata field: {0:?}")] MissingMetaField(stone::payload::meta::Tag), #[error("create directory")] diff --git a/crates/moss/src/repository/mod.rs b/crates/moss/src/repository/mod.rs index da26ae93..8502b8c7 100644 --- a/crates/moss/src/repository/mod.rs +++ b/crates/moss/src/repository/mod.rs @@ -4,6 +4,7 @@ use std::{collections::HashMap, fmt, path::Path}; +use config::Config; use futures::StreamExt; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -13,7 +14,7 @@ use tokio::{ }; use url::Url; -use crate::{db::meta, request, Config}; +use crate::{db::meta, request}; pub use self::manager::Manager; @@ -92,7 +93,7 @@ impl Ord for Priority { } /// A map of repositories -#[derive(Debug, Default, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Map(HashMap); impl Map { @@ -107,6 +108,10 @@ impl Map { pub fn add(&mut self, id: Id, repo: Repository) { self.0.insert(id, repo); } + + pub fn iter(&self) -> impl Iterator { + self.0.iter() + } } impl IntoIterator for Map { diff --git a/crates/stone_recipe/Cargo.toml b/crates/stone_recipe/Cargo.toml index 3fc92ee8..0b8b5f2f 100644 --- a/crates/stone_recipe/Cargo.toml +++ b/crates/stone_recipe/Cargo.toml @@ -6,7 +6,9 @@ edition.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +nom.workspace = true serde.workspace = true serde_yaml.workspace = true +strum.workspace = true thiserror.workspace = true url.workspace = true diff --git a/crates/stone_recipe/src/lib.rs b/crates/stone_recipe/src/lib.rs index fd0e56e4..dee8d601 100644 --- a/crates/stone_recipe/src/lib.rs +++ b/crates/stone_recipe/src/lib.rs @@ -8,7 +8,17 @@ use serde::Deserialize; use thiserror::Error; use url::Url; -pub fn from_slice(bytes: &[u8]) -> Result { +pub use serde_yaml::Error; + +pub use self::macros::Macros; +pub use self::script::Script; +pub use self::tuning::Tuning; + +pub mod macros; +pub mod script; +pub mod tuning; + +pub fn from_slice(bytes: &[u8]) -> Result { serde_yaml::from_slice(bytes) } @@ -36,7 +46,7 @@ pub struct Recipe { pub architectures: Vec, #[serde(default)] pub tuning: Vec>, - #[serde(default)] + #[serde(default, deserialize_with = "stringy_bool")] pub emul32: bool, } @@ -49,6 +59,7 @@ pub struct KeyValue { #[derive(Debug, Clone, Deserialize)] pub struct Source { pub name: String, + #[serde(deserialize_with = "force_string")] pub version: String, pub release: u64, pub homepage: String, @@ -73,14 +84,14 @@ pub struct Build { #[derive(Debug, Clone, Deserialize)] pub struct Options { #[serde(default)] - pub toolchain: Toolchain, - #[serde(default)] + pub toolchain: tuning::Toolchain, + #[serde(default, deserialize_with = "stringy_bool")] pub cspgo: bool, - #[serde(default)] + #[serde(default, deserialize_with = "stringy_bool")] pub samplepgo: bool, - #[serde(default = "default_true")] + #[serde(default = "default_true", deserialize_with = "stringy_bool")] pub strip: bool, - #[serde(default)] + #[serde(default, deserialize_with = "stringy_bool")] pub networking: bool, } @@ -90,14 +101,8 @@ pub struct Package { pub description: Option, #[serde(default, rename = "rundeps")] pub run_deps: Vec, -} - -#[derive(Debug, Clone, Copy, Default, Deserialize)] -#[serde(rename_all = "lowercase")] -pub enum Toolchain { - #[default] - Llvm, - Gnu, + #[serde(default)] + pub paths: Vec, } #[derive(Debug, Clone)] @@ -106,9 +111,9 @@ pub enum Upstream { uri: Url, hash: String, rename: Option, - strip_dirs: u8, + strip_dirs: Option, unpack: bool, - unpack_dir: PathBuf, + unpack_dir: Option, }, Git { uri: Url, @@ -123,29 +128,25 @@ impl<'de> Deserialize<'de> for Upstream { where D: serde::Deserializer<'de>, { - fn default_unpack_dir() -> PathBuf { - ".".into() - } - #[derive(Debug, Deserialize)] #[serde(untagged)] enum Inner { Plain { hash: String, rename: Option, - #[serde(default, rename = "stripdirs")] - strip_dirs: u8, - #[serde(default = "default_true")] + #[serde(rename = "stripdirs")] + strip_dirs: Option, + #[serde(default = "default_true", deserialize_with = "stringy_bool")] unpack: bool, - #[serde(default = "default_unpack_dir", rename = "unpackdir")] - unpack_dir: PathBuf, + #[serde(rename = "unpackdir")] + unpack_dir: Option, }, Git { #[serde(rename = "ref")] ref_id: String, #[serde(rename = "clonedir")] clone_dir: Option, - #[serde(default = "default_true")] + #[serde(default = "default_true", deserialize_with = "stringy_bool")] staging: bool, }, } @@ -186,9 +187,9 @@ impl<'de> Deserialize<'de> for Upstream { uri, hash, rename: None, - strip_dirs: 0, + strip_dirs: None, unpack: default_true(), - unpack_dir: default_unpack_dir(), + unpack_dir: None, }), Some((Uri::Git(uri), Outer::String(ref_id))) => Ok(Upstream::Git { uri, @@ -239,13 +240,12 @@ impl<'de> Deserialize<'de> for Upstream { } #[derive(Debug, Clone)] -pub enum Tuning { - Enable, - Disable, - Config(String), +pub struct Path { + pub path: PathBuf, + pub kind: PathKind, } -impl<'de> Deserialize<'de> for KeyValue { +impl<'de> Deserialize<'de> for Path { fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de>, @@ -253,42 +253,37 @@ impl<'de> Deserialize<'de> for KeyValue { #[derive(Debug, Deserialize)] #[serde(untagged)] enum Inner { - Bool(bool), - Config(String), - } - - #[derive(Debug, Deserialize)] - #[serde(untagged)] - enum Outer { - Key(String), - KeyValue(HashMap), + String(PathBuf), + KeyValue(HashMap), } - match Outer::deserialize(deserializer)? { - Outer::Key(key) => Ok(KeyValue { - key, - value: Tuning::Enable, + match Inner::deserialize(deserializer)? { + Inner::String(path) => Ok(Path { + path, + kind: PathKind::default(), }), - Outer::KeyValue(map) => match map.into_iter().next() { - Some((key, Inner::Bool(true))) => Ok(KeyValue { - key, - value: Tuning::Enable, - }), - Some((key, Inner::Bool(false))) => Ok(KeyValue { - key, - value: Tuning::Disable, - }), - Some((key, Inner::Config(config))) => Ok(KeyValue { - key, - value: Tuning::Config(config), - }), - // unreachable? - None => Err(serde::de::Error::custom("missing tuning entry")), - }, + Inner::KeyValue(map) => { + if let Some((path, kind)) = map.into_iter().next() { + Ok(Path { path, kind }) + } else { + Err(serde::de::Error::custom("missing path entry")) + } + } } } } +#[derive(Debug, Clone, Copy, Deserialize, strum::EnumString, Default)] +#[serde(try_from = "&str")] +#[strum(serialize_all = "lowercase")] +pub enum PathKind { + #[default] + Any, + Exe, + Symlink, + Special, +} + fn default_true() -> bool { true } @@ -331,6 +326,50 @@ where })) } +fn stringy_bool<'de, D>(deserializer: D) -> Result +where + D: serde::de::Deserializer<'de>, +{ + #[derive(Deserialize)] + #[serde(untagged)] + enum Inner { + Bool(bool), + String(String), + } + + match Inner::deserialize(deserializer)? { + Inner::Bool(bool) => Ok(bool), + // Really yaml... + Inner::String(s) => match s.as_str() { + "y" | "Y" | "yes" | "Yes" | "YES" | "true" | "True" | "TRUE" | "on" | "On" | "ON" => { + Ok(true) + } + "n" | "N" | "no" | "No" | "NO" | "false" | "False" | "FALSE" | "off" | "Off" + | "OFF" => Ok(false), + _ => Err(serde::de::Error::custom( + "invalid boolean: expected true or false", + )), + }, + } +} + +fn force_string<'de, D>(deserializer: D) -> Result +where + D: serde::de::Deserializer<'de>, +{ + #[derive(Deserialize)] + #[serde(untagged)] + enum Inner { + String(String), + Number(serde_yaml::Number), + } + + match Inner::deserialize(deserializer)? { + Inner::String(s) => Ok(s), + Inner::Number(n) => Ok(n.to_string()), + } +} + #[cfg(test)] mod test { use super::*; diff --git a/crates/stone_recipe/src/macros.rs b/crates/stone_recipe/src/macros.rs new file mode 100644 index 00000000..94536c22 --- /dev/null +++ b/crates/stone_recipe/src/macros.rs @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use serde::Deserialize; + +use crate::{ + sequence_of_key_value, + tuning::{TuningFlag, TuningGroup}, + Error, KeyValue, Package, +}; + +pub fn from_slice(bytes: &[u8]) -> Result { + serde_yaml::from_slice(bytes) +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Macros { + #[serde(default, deserialize_with = "sequence_of_key_value")] + pub actions: Vec>, + #[serde(default, deserialize_with = "sequence_of_key_value")] + pub definitions: Vec>, + #[serde(default, deserialize_with = "sequence_of_key_value")] + pub flags: Vec>, + #[serde(default, deserialize_with = "sequence_of_key_value")] + pub tuning: Vec>, + #[serde(default, deserialize_with = "sequence_of_key_value")] + pub packages: Vec>, + #[serde(default)] + pub default_tuning_groups: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct Action { + pub command: String, + #[serde(default)] + pub dependencies: Vec, +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn deserialize() { + let inputs = [ + &include_bytes!("../../../test/base.yml")[..], + &include_bytes!("../../../test/x86_64.yml")[..], + &include_bytes!("../../../test/cmake.yml")[..], + ]; + + for input in inputs { + let recipe = from_slice(input).unwrap(); + dbg!(&recipe); + } + } +} diff --git a/crates/stone_recipe/src/script.rs b/crates/stone_recipe/src/script.rs new file mode 100644 index 00000000..9877ec70 --- /dev/null +++ b/crates/stone_recipe/src/script.rs @@ -0,0 +1,192 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 +#![allow(clippy::map_collect_result_unit)] + +use std::collections::{HashMap, HashSet}; + +use nom::{ + branch::alt, + bytes::complete::tag, + character::complete::{alpha1, anychar, char, digit1}, + combinator::{eof, iterator, map, peek, recognize, value}, + multi::{many1, many_till}, + sequence::{delimited, preceded, terminated}, +}; +use thiserror::Error; + +use crate::{macros::Action, Macros}; + +#[derive(Default)] +pub struct Parser { + actions: HashMap, + definitions: HashMap, +} + +impl Parser { + pub fn new() -> Self { + Self::default() + } + + pub fn add_action(&mut self, identifier: impl ToString, action: Action) { + self.actions.insert(identifier.to_string(), action); + } + + pub fn add_definition(&mut self, identifier: impl ToString, definition: impl ToString) { + self.definitions + .insert(identifier.to_string(), definition.to_string()); + } + + pub fn add_macros(&mut self, macros: Macros) { + macros.actions.into_iter().for_each(|kv| { + self.add_action(kv.key, kv.value); + }); + macros.definitions.into_iter().for_each(|kv| { + self.add_definition(kv.key, kv.value); + }); + } + + pub fn parse(&self, input: &str) -> Result { + parse(input, &self.actions, &self.definitions) + } +} + +#[derive(Debug)] +pub struct Script { + pub content: String, + pub dependencies: Vec, +} + +fn parse( + input: &str, + actions: &HashMap, + definitions: &HashMap, +) -> Result { + let mut content = String::new(); + let mut dependencies = HashSet::new(); + + tokens(input, |token| { + match token { + Token::Action(identifier) => { + let action = actions + .get(identifier) + .ok_or(Error::UnknownAction(identifier.to_string()))?; + dependencies.extend(action.dependencies.clone()); + + let script = parse(&action.command, actions, definitions)?; + + content.push_str(&script.content); + dependencies.extend(script.dependencies); + } + Token::Definition(identifier) => { + let definition = definitions + .get(identifier) + .ok_or(Error::UnknownDefinition(identifier.to_string()))?; + + let script = parse(definition, actions, definitions)?; + + content.push_str(&script.content); + dependencies.extend(script.dependencies); + } + Token::Plain(plain) => content.push_str(plain), + } + Ok(()) + })?; + + Ok(Script { + content: content.trim().to_string(), + dependencies: dependencies.into_iter().collect(), + }) +} + +#[derive(Debug)] +enum Token<'a> { + Action(&'a str), + Definition(&'a str), + Plain(&'a str), +} + +fn tokens(input: &str, f: impl FnMut(Token) -> Result<(), Error>) -> Result<(), Error> { + // A-Za-z0-9_ + let identifier = |input| recognize(many1(alt((alpha1, digit1, tag("_")))))(input); + // %identifier + let action = |input| preceded(char('%'), identifier)(input); + // %(identifier) + let definition = + |input| preceded(char('%'), delimited(char('('), identifier, char(')')))(input); + // action or definition + let macro_ = alt((action, definition)); + // %% -> % + let escaped = |input| preceded(char('%'), value("%", char('%')))(input); + // Escaped or any char until escape, next macro or EOF + let plain = alt(( + escaped, + recognize(many_till(anychar, peek(alt((escaped, macro_))))), + recognize(terminated(many1(anychar), eof)), + )); + + let token = alt(( + map(action, Token::Action), + map(definition, Token::Definition), + map(plain, Token::Plain), + )); + + let mut iter = iterator(input, token); + + iter.map(f).collect::>()?; + + iter.finish().map_err(convert_error)?; + + Ok(()) +} + +fn convert_error( + err: nom::Err<(&str, nom::error::ErrorKind)>, +) -> nom::Err> { + err.to_owned().map(|(i, e)| nom::error::Error::new(i, e)) +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("unknown action macro: %{0}")] + UnknownAction(String), + #[error("unknown definition macro: %({0})")] + UnknownDefinition(String), + #[error("parse script")] + Parser(#[from] nom::Err>), +} + +#[cfg(test)] +mod test { + use super::*; + use crate::macros::Action; + + #[test] + fn parse_script() { + let input = + "%patch %%escaped %{ %(pkgdir)/0001-deps-analysis-elves-In-absence-of-soname.-make-one-u.patch"; + + let mut parser = Parser::new(); + parser.add_action( + "patch", + Action { + command: "patch -v %(nested_flag)".into(), + dependencies: vec!["patch".into()], + }, + ); + + for (id, definition) in [ + ("nested_flag", "--args=%(nested_arg),b,c"), + ("nested_arg", "a"), + ("pkgdir", "%(root)/pkg"), + ("root", "/mason"), + ] { + parser.add_definition(id, definition); + } + + let script = parser.parse(input).unwrap(); + + assert_eq!(script.content, "patch -v --args=a,b,c %escaped %{ /mason/pkg/0001-deps-analysis-elves-In-absence-of-soname.-make-one-u.patch".to_string()); + assert_eq!(script.dependencies, vec!["patch".to_string()]) + } +} diff --git a/crates/stone_recipe/src/tuning.rs b/crates/stone_recipe/src/tuning.rs new file mode 100644 index 00000000..b7666591 --- /dev/null +++ b/crates/stone_recipe/src/tuning.rs @@ -0,0 +1,257 @@ +// SPDX-FileCopyrightText: Copyright © 2020-2023 Serpent OS Developers +// +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::{HashMap, HashSet}; + +use serde::Deserialize; +use thiserror::Error; + +use crate::{sequence_of_key_value, single_as_sequence, KeyValue, Macros}; + +#[derive(Debug, Clone)] +pub enum Tuning { + Enable, + Disable, + Config(String), +} + +impl<'de> Deserialize<'de> for KeyValue { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + #[derive(Debug, Deserialize)] + #[serde(untagged)] + enum Inner { + Bool(bool), + Config(String), + } + + #[derive(Debug, Deserialize)] + #[serde(untagged)] + enum Outer { + Key(String), + KeyValue(HashMap), + } + + match Outer::deserialize(deserializer)? { + Outer::Key(key) => Ok(KeyValue { + key, + value: Tuning::Enable, + }), + Outer::KeyValue(map) => match map.into_iter().next() { + Some((key, Inner::Bool(true))) => Ok(KeyValue { + key, + value: Tuning::Enable, + }), + Some((key, Inner::Bool(false))) => Ok(KeyValue { + key, + value: Tuning::Disable, + }), + Some((key, Inner::Config(config))) => Ok(KeyValue { + key, + value: Tuning::Config(config), + }), + // unreachable? + None => Err(serde::de::Error::custom("missing tuning entry")), + }, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TuningFlag { + #[serde(flatten)] + root: CompilerFlags, + #[serde(default)] + gnu: CompilerFlags, + #[serde(default)] + llvm: CompilerFlags, +} + +impl TuningFlag { + pub fn get(&self, flag: CompilerFlag, toolchain: Toolchain) -> Option<&str> { + match toolchain { + Toolchain::Llvm => self.llvm.get(flag), + Toolchain::Gnu => self.gnu.get(flag), + } + .or_else(|| self.root.get(flag)) + } +} + +#[derive(Debug, Clone, Copy)] +pub enum CompilerFlag { + C, + Cxx, + D, + Ld, +} + +#[derive(Debug, Clone, Deserialize, Default)] +pub struct CompilerFlags { + c: Option, + cxx: Option, + d: Option, + ld: Option, +} + +impl CompilerFlags { + fn get(&self, flag: CompilerFlag) -> Option<&str> { + match flag { + CompilerFlag::C => self.c.as_deref(), + CompilerFlag::Cxx => self.cxx.as_deref(), + CompilerFlag::D => self.d.as_deref(), + CompilerFlag::Ld => self.ld.as_deref(), + } + } +} + +#[derive(Debug, Clone, Copy, Default, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum Toolchain { + #[default] + Llvm, + Gnu, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TuningOption { + #[serde(default, deserialize_with = "single_as_sequence")] + pub enabled: Vec, + #[serde(default, deserialize_with = "single_as_sequence")] + pub disabled: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TuningGroup { + #[serde(flatten, default)] + pub root: TuningOption, + pub default: Option, + #[serde( + default, + rename = "options", + deserialize_with = "sequence_of_key_value" + )] + pub choices: Vec>, +} + +#[derive(Debug, Default)] +pub struct Builder { + flags: HashMap, + groups: HashMap, + enabled: HashSet, + disabled: HashSet, + option_sets: HashMap, +} + +impl Builder { + pub fn new() -> Self { + Self::default() + } + + pub fn add_flag(&mut self, name: impl ToString, flag: TuningFlag) { + self.flags.insert(name.to_string(), flag); + } + + pub fn add_group(&mut self, name: impl ToString, group: TuningGroup) { + self.groups.insert(name.to_string(), group); + } + + pub fn add_macros(&mut self, macros: Macros) { + macros.flags.into_iter().for_each(|kv| { + self.add_flag(kv.key, kv.value); + }); + macros.tuning.into_iter().for_each(|kv| { + self.add_group(kv.key, kv.value); + }); + } + + pub fn enable(&mut self, name: impl ToString, config: Option) -> Result<(), Error> { + let name = name.to_string(); + + let group = self + .groups + .get(&name) + .ok_or_else(|| Error::UnknownGroup(name.clone()))?; + + self.enabled.insert(name.clone()); + self.disabled.remove(&name); + + if let Some(value) = config.or_else(|| group.default.clone()) { + if group.choices.iter().any(|kv| kv.key == value) { + self.option_sets.insert(name, value); + } else { + return Err(Error::UnknownGroupValue(value, name)); + } + } + + Ok(()) + } + + pub fn disable(&mut self, name: impl ToString) -> Result<(), Error> { + let name = name.to_string(); + + if !self.groups.contains_key(&name) { + return Err(Error::UnknownGroup(name)); + } + + self.disabled.insert(name.clone()); + self.enabled.remove(&name); + self.option_sets.remove(&name); + + Ok(()) + } + + pub fn build(&self) -> Result, Error> { + let mut enabled_flags = HashSet::new(); + let mut disabled_flags = HashSet::new(); + + for enabled in &self.enabled { + let Some(group) = self.groups.get(enabled) else { + continue; + }; + + let mut to = &group.root; + + if let Some(option) = self.option_sets.get(enabled) { + if let Some(choice) = group.choices.iter().find(|kv| &kv.key == option) { + to = &choice.value; + } + } + + enabled_flags.extend(to.enabled.clone()); + } + + for disabled in &self.disabled { + let Some(group) = self.groups.get(disabled) else { + continue; + }; + disabled_flags.extend(group.root.disabled.clone()); + } + + for flag in enabled_flags.iter().chain(&disabled_flags) { + if !self.flags.contains_key(flag) { + return Err(Error::UnknownFlag(flag.clone())); + } + } + + Ok(enabled_flags + .iter() + .chain(&disabled_flags) + .collect::>() + .into_iter() + .filter_map(|flag| self.flags.get(flag).cloned()) + .collect()) + } +} + +#[derive(Debug, Error)] +pub enum Error { + #[error("unknown flag {0}")] + UnknownFlag(String), + #[error("unknown group {0}")] + UnknownGroup(String), + #[error("unknown value {0} for group {1}")] + UnknownGroupValue(String, String), +} diff --git a/test/base.yml b/test/base.yml new file mode 100644 index 00000000..9970399b --- /dev/null +++ b/test/base.yml @@ -0,0 +1,588 @@ +# Provides core definitions which each profile may override + +definitions: + + # Basic variables required for packages to build correctly + - libsuffix : "" + - prefix : "/usr" + - bindir : "%(prefix)/bin" + - sbindir : "%(prefix)/sbin" + - includedir : "%(prefix)/include" + - datadir : "%(prefix)/share" + - localedir : "%(datadir)/locale" + - infodir : "%(datadir)/info" + - mandir : "%(datadir)/man" + - docdir : "%(datadir)/doc" + - vendordir : "%(datadir)/defaults" + - completionsdir : "%(datadir)/bash-completion/completions" + - tmpfilesdir : "%(prefix)/lib/tmpfiles.d" + - sysusersdir : "%(prefix)/lib/sysusers.d" + - udevrulesdir : "%(prefix)/lib/udev/rules.d" + - localstatedir : "/var" + - sharedstatedir : "%(localstatedir)/lib" + - runstatedir : "/run" + - sysconfdir : "/etc" + - libdir : "%(prefix)/lib%(libsuffix)" + - libexecdir : "%(libdir)/%(name)" + - builddir : "serpent_builddir" + + # The vendorID is encoded into the triplet, toolchain, builds, etc. + # It must match the triplet from bootstrap-scripts. + - vendorID : "serpent-linux" + + # Must be set for CC/CXX/CPP to work + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - objc : "%(compiler_objc)" + - objcxx : "%(compiler_objcxx)" + - cpp : "%(compiler_cpp)" + - objcpp : "%(compiler_objcpp)" + - objcxxcpp : "%(compiler_objcxxcpp)" + - ar : "%(compiler_ar)" + - ld : "%(compiler_ld)" + - objcopy : "%(compiler_objcopy)" + - nm : "%(compiler_nm)" + - ranlib : "%(compiler_ranlib)" + - strip : "%(compiler_strip)" + - path : "%(compiler_path)" + - ccachedir : "%(compiler_cache)" + - pkgconfigpath : "%(libdir)/pkgconfig:/usr/share/pkgconfig" + +actions : + + # scriptBase is merged to the top of all newly generated build scripts. + - scriptBase : + command: | + #!/bin/sh + set -e + set -x + TERM="dumb"; export TERM + PKG_CONFIG_PATH="%(pkgconfigpath)"; export PKG_CONFIG_PATH + CFLAGS="%(cflags)"; export CFLAGS + CGO_CFLAGS="%(cflags)"; export CGO_CFLAGS + CXXFLAGS="%(cxxflags)"; export CXXFLAGS + CGO_CXXFLAGS="%(cxxflags)"; export CGO_CXXFLAGS + LDFLAGS="%(ldflags)"; export LDFLAGS + CGO_LDFLAGS="%(ldflags) -Wl,--no-gc-sections"; export CGO_LDFLAGS + CC="%(cc)"; export CC + CXX="%(cxx)"; export CXX + OBJC="%(objc)"; export OBJC + OBJCXX="%(objcxx)"; export OBJCXX + CPP="%(cpp)"; export CPP + OBJCPP="%(objcpp)"; export OBJCPP + OBJCXXCPP="%(objcxxcpp)"; export OBJCXXCPP + AR="%(ar)"; export AR + LD="%(ld)"; export LD + OBJCOPY="%(objcopy)"; export OBJCOPY + NM="%(nm)"; export NM + RANLIB="%(ranlib)"; export RANLIB + STRIP="%(strip)"; export STRIP + PATH="%(path)"; export PATH + CCACHE_DIR="%(ccachedir)"; export CCACHE_DIR; + test -z "$CCACHE_DIR" && unset CCACHE_DIR; + LANG="en_US.UTF-8"; export LANG + LC_ALL="en_US.UTF-8"; export LC_ALL + test -d "%(workdir)" || (echo "The work directory %(workdir) does not exist"; exit 1) + cd "%(workdir)" && echo "The work directory %%(workdir) is ${PWD}" + +defaultTuningGroups : + - asneeded + - avxwidth + - base + - bindnow + - debug + - fortify + - frame-pointer + - harden + - icf + - optimize + - relr + - symbolic + +tuning : + # A set of groups we can toggle from the "tune" key + + # Architecture flags should always be enabled + - architecture: + enabled: + - architecture + + # Base flags should almost always be enabled, but want to be able to disable + - base: + enabled: + - base + + - debug: + options: + - lines: + enabled: debug-lines + - std: + enabled: debug-std + default: std + + # Toggle frame-pointer + - frame-pointer: + enabled: no-omit-frame-pointer + disabled: omit-frame-pointer + + # Enable bindnow functionality + - bindnow: + enabled: bindnow + + # Enable symbolic + - symbolic: + options: + - all: + enabled: symbolic-all + - functions: + enabled: symbolic-functions + - nonweak: + enabled: symbolic-nonweak + default: functions + + # Enable fortify + - fortify: + enabled: fortify + + # Enable hardening + - harden: + options: + - none: + enabled: harden-none + - lvl1: + enabled: harden-lvl1 + - lvl2: + enabled: harden-lvl2 + disabled: harden-none + default: lvl1 + + # Enable optimisation per given levels + - optimize: + options: + - fast: + enabled: optimize-fast + - generic: + enabled: optimize-generic + - size: + enabled: + - optimize-size + - sections + - speed: + enabled: optimize-speed + default: generic + + # Enable LTO + - lto: + options: + - full: + enabled: lto-full + - thin: + enabled: lto-thin + default: full + + # Enable LTOextra. Requires the equivalent lto option + - ltoextra: + options: + - full: + enabled: ltoextra-full + - thin: + enabled: ltoextra-thin + default: full + + # Enable ICF + - icf: + options: + - safe: + enabled: icf-safe + - all: + enabled: icf-all + default: safe + + # Enable Ignore data address equality + - idae: + enabled: idae + + # Enable Polly + - polly: + enabled: polly + + # Enable section splitting + - sections: + enabled: sections + + # Toggle common + - common: + enabled: common + + # Enable math + - math: + enabled: math + + # Enable noplt + - noplt: + enabled: + - noplt + - bindnow + + # Enable nosemantic + - nosemantic: + enabled: nosemantic + + # Enable nodaed + - nodaed: + enabled: nodaed + + # Enable asneeded + - asneeded: + enabled: asneeded + + # Enable avxwidth + - avxwidth: + enabled: avxwidth-128 + + # Enable bolt + - bolt: + enabled: bolt + + # Enable runpath + - runpath: + enabled: runpath + + # Enable sse2avx + - sse2avx: + enabled: sse2avx + + # Enable pch-instantiate + - pch-instantiate: + enabled: pch-instantiate + + # Enable visibility + - visibility: + options: + - inline: + enabled: visibility-inline + - hidden: + enabled: visibility-hidden + default: inline + + # Enable relative-vtables + - relative-vtables: + enabled: relative-vtables + + # Enable relr + - relr: + enabled: relr + +flags : + + # Needs overriding with -march/mtune values. + - architecture: + c : "" + cxx : "" + ld : "" + + # Base flags, enabled by default + - base: + c : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" + cxx : "-pipe -Wformat -Wformat-security -Wno-error -fPIC" + ld : "-Wl,-O2,--gc-sections" + + - omit-frame-pointer: + c : "-fomit-frame-pointer -momit-leaf-frame-pointer" + cxx : "-fomit-frame-pointer -momit-leaf-frame-pointer" + + - no-omit-frame-pointer: + c : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" + cxx : "-fno-omit-frame-pointer -mno-omit-leaf-frame-pointer" + + # Toggle bindnow (ON) + - bindnow: + ld : "-Wl,-z,relro,-z,now" + + # Toggle symbolic (ON symbolic-functions) + - symbolic-all: + ld : "-Wl,-Bsymbolic" + + - symbolic-functions: + ld : "-Wl,-Bsymbolic-functions" + + - symbolic-nonweak: + llvm: + ld : "-Wl,-Bsymbolic-non-weak-functions" + + # Toggle fortify (ON) + - fortify: + c : "-D_FORTIFY_SOURCE=2" + cxx : "-D_FORTIFY_SOURCE=2" + + # No hardening! + - harden-none: + c : "-fno-stack-protector" + cxx : "-fno-stack-protector" + + # Hardening (ON harden-lvl1) + - harden-lvl1: + c : "-fstack-protector --param ssp-buffer-size=32" + cxx : "-fstack-protector --param ssp-buffer-size=32" + + - harden-lvl2: + llvm: + c : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + cxx : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + gnu: + c : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + cxx : "-fstack-protector-strong -fstack-clash-protection -fPIE --param ssp-buffer-size=4" + + # Use section splitting, improves GC without lto only (OFF) + - sections: + c : "-ffunction-sections -fdata-sections" + cxx : "-ffunction-sections -fdata-sections" + + # Optimize without care for math issues + - optimize-fast: + c : "-Ofast" + cxx : "-Ofast" + + # Generic optimisation case (ON) + - optimize-generic: + c : "-O2" + cxx : "-O2" + + # Optimize for size (OFF) + - optimize-size: + c : "-Os" + cxx : "-Os" + + # Optimize for speed (OFF) + - optimize-speed: + c : "-O3" + cxx : "-O3" + + # Enable LTO optimisations (OFF) + - lto-full: + c : "-flto" + cxx : "-flto" + ld : "-flto" + + # Enable Thin-LTO optimisations (OFF) + - lto-thin: + llvm: + c : "-flto=thin" + cxx : "-flto=thin" + ld : "-flto=thin" + + # Enable LTOextra optimisations (OFF) + - ltoextra-full: + gnu: + c : "-fdevirtualize-at-ltrans" + cxx : "-fdevirtualize-at-ltrans" + llvm: + c : "-fwhole-program-vtables -fvirtual-function-elimination" + cxx : "-fwhole-program-vtables -fvirtual-function-elimination" + + # Enable Thin-LTOextra optimisations (OFF) + - ltoextra-thin: + llvm: + c : "-fwhole-program-vtables" + cxx : "-fwhole-program-vtables" + + # Enable ALL LLVM ICF optimisations (OFF) + - icf-all: + llvm: + ld : "-Wl,--icf=all" + + # Enable LLVM ICF optimisations (ON) + - icf-safe: + llvm: + ld : "-Wl,--icf=safe" + + # Ignore data address equality (OFF) + - idae: + llvm: + ld : "-Wl,--ignore-data-address-equality" + + # Enable LLVM polly optimisations (OFF) + - polly: + llvm: + c : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" + cxx : "-Xclang -mllvm -Xclang -polly -Xclang -mllvm -Xclang -polly-vectorizer=stripmine" + + # Toggle options you want to use with llvm-bolt (OFF) + - bolt: + gnu: + c : "-fno-reorder-blocks-and-partition​" + cxx : "-fno-reorder-blocks-and-partition​" + ld : "-Wl,-q" + llvm: + ld : "-Wl,-q" + + # Toggle -fcommon (OFF) + - common: + c : "-fcommon" + cxx : "-fcommon" + + # Toggle debug-lines optimisations + - debug-lines: + llvm: + c : "-gline-tables-only -fasynchronous-unwind-tables" + cxx : "-gline-tables-only -fasynchronous-unwind-tables" + + # Toggle debug-std optimisations (ON) + - debug-std: + c : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + cxx : "-g -feliminate-unused-debug-types -fasynchronous-unwind-tables" + + # Toggle fast math (OFF) + - math: + gnu: + c : "-fno-math-errno -fno-trapping-math" + cxx : "-fno-math-errno -fno-trapping-math" + llvm: + c : "-fno-math-errno -fno-trapping-math -ffp-contract=fast -ffp-model=fast" + cxx : "-fno-math-errno -fno-trapping-math -ffp-contract=fast -ffp-model=fast" + + # Toggle noplt, requires bindnow (OFF) + - noplt: + c : "-fno-plt" + cxx : "-fno-plt" + + # Toggle -fno-semantic-interposition (OFF) + - nosemantic: + c : "-fno-semantic-interposition" + cxx : "-fno-semantic-interposition" + + # Toggle -fno-direct-access-external-data (OFF) + - nodaed: + llvm: + c : "-fno-direct-access-external-data" + cxx : "-fno-direct-access-external-data" + + # Prefer 128-bit vector width (ON) + - avxwidth-128: + c : "-mprefer-vector-width=128" + cxx : "-mprefer-vector-width=128" + + # Toggle -fpch-instantiate-templates (OFF) + - pch-instantiate: + llvm: + c : "-fpch-instantiate-templates" + cxx : "-fpch-instantiate-templates" + + # Toggle asneeded (ON) + - asneeded: + ld : "-Wl,--as-needed" + + # Toggle runpath (OFF) + - runpath: + ld : "-Wl,--enable-new-dtags" + + # Toggle sse2avx (OFF) + - sse2avx: + gnu: + c : "-msse2avx" + cxx : "-msse2avx" + + # Toggle visibility hidden (OFF) + - visibility-hidden: + c : "-fvisibility=hidden" + cxx : "-fvisibility-inlines-hidden -fvisibility=hidden" + + # Toggle visibility inlines hidden (OFF) + - visibility-inline: + cxx : "-fvisibility-inlines-hidden" + + # Enable relative vtables (OFF) + - relative-vtables: + llvm: + cxx : "-fexperimental-library -fexperimental-relative-c++-abi-vtables" + + # Toggle relr (ON) + - relr: + ld : "-Wl,-z,pack-relative-relocs" + +# Template packages +packages : + + # Main package + - "%(name)": + paths: + - "*" + + # Some documentation + - "%(name)-docs": + summary: "Documentation for %(name)" + description: | + Documentation files for the %(name) package + paths: + - /usr/share/gtk-doc + + # Main development subpackage + - "%(name)-devel": + summary: "Development files for %(name)" + description: | + Install this package if you intend to build software against + the %(name) package. + paths: + - /usr/include + - /usr/lib/*.a + - /usr/lib/cmake + - /usr/lib/lib*.so + - /usr/lib/pkgconfig + - /usr/share/aclocal + - /usr/share/man/man2 + - /usr/share/man/man3 + - /usr/share/man/man9 + - /usr/share/pkgconfig + rundeps: + - "%(name)" + + # Main dbginfo package + - "%(name)-dbginfo": + summary: "Debugging symbols for %(name)" + description: | + Install this package if you need debugging information + symbols + for the %(name) package. + paths: + - /usr/lib/debug + + # Template for a -libs sub-package which can be used by adding paths via the stone.yml file + - "%(name)-libs": + summary: "Library files for %(name)" + description: | + Library files for %(name), typically pulled in as a dependency of another package. + + # 32-bit compat libraries + - "%(name)-32bit": + summary: "Provides 32-bit runtime libraries for %(name)" + description: | + Install this package if you need the 32-bit versions of the + %(name) package libraries. + paths: + - /usr/lib32 + - /usr/lib32/lib*.so.* + rundeps: + - "%(name)" + + # 32-bit development files + - "%(name)-32bit-devel": + summary: "Provides development files for %(name)-32bit" + description: | + Install this package if you need to build software against + the 32-bit version of %(name), %(name)-32bit. + paths: + - /usr/lib32/*.a + - /usr/lib32/cmake + - /usr/lib32/lib*.so + - /usr/lib32/pkgconfig + rundeps: + - "%(name)-32bit" + - "%(name)-devel" + + # 32-bit debug symbols + - "%(name)-32bit-dbginfo": + summary: "Debugging symbols for %(name)-32bit" + description: | + Install this package if you need debugging information + symbols + for the %(name)-32bit package. + paths: + - /usr/lib32/debug diff --git a/test/cmake.yml b/test/cmake.yml new file mode 100644 index 00000000..ca58ca5f --- /dev/null +++ b/test/cmake.yml @@ -0,0 +1,44 @@ +actions: + + # Perform cmake with the default options in a subdirectory + - cmake: + command: | + cmake %(options_cmake) + dependencies: + - cmake + + # Perform cmake with unity build enabled + - cmake_unity: + command: | + cmake -DCMAKE_UNITY_BUILD=ON %(options_cmake) + dependencies: + - cmake + + # Build the cmake project + - cmake_build: + command: | + ninja -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - ninja + + # Install results of the build to the destination directory + - cmake_install: + command: | + DESTDIR="%(installroot)" ninja install -v -j "%(jobs)" -C "%(builddir)" + dependencies: + - ninja + +definitions: + + # Default cmake options as passed to cmake + - options_cmake: | + -G Ninja -S . -B "%(builddir)" \ + -DCMAKE_C_FLAGS="${CFLAGS}" \ + -DCMAKE_CXX_FLAGS="${CXXFLAGS}" \ + -DCMAKE_C_FLAGS_RELEASE="" \ + -DCMAKE_CXX_FLAGS_RELEASE="" \ + -DCMAKE_LD_FLAGS="${LDFLAGS}" \ + -DCMAKE_BUILD_TYPE="Release" \ + -DCMAKE_INSTALL_LIBDIR="lib" \ + -DCMAKE_INSTALL_PREFIX="%(prefix)" \ + -DCMAKE_LIB_SUFFIX="%(libsuffix)" diff --git a/test/x86_64.yml b/test/x86_64.yml new file mode 100644 index 00000000..7e812f8b --- /dev/null +++ b/test/x86_64.yml @@ -0,0 +1,19 @@ +# Provides -m64 builds for x86_64 build-hosts + +definitions: + + - libsuffix : "" + - build_platform : x86_64-%(vendorID) + - host_platform : x86_64-%(vendorID) + - cc : "%(compiler_c)" + - cxx : "%(compiler_cxx)" + - cpp : "%(compiler_cpp)" + - march : x86-64-v2 + - mtune : ivybridge + +flags: + + # Set architecture flags + - architecture: + c : "-march=x86-64-v2 -mtune=ivybridge" + cxx : "-march=x86-64-v2 -mtune=ivybridge"