diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml new file mode 100644 index 000000000..eb95f9db3 --- /dev/null +++ b/.github/actions/setup/action.yml @@ -0,0 +1,18 @@ +name: setup + +inputs: + name: + required: true + type: string + +runs: + using: "composite" + steps: + - uses: actions/checkout@v4 + + - uses: actions/download-artifact@v4 + with: + name: ${{ inputs.name }} + + - run: tar xf ${{ inputs.name }}.tar + shell: bash diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 0c3812681..4d38c7f35 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -4,29 +4,24 @@ on: push: branches: [main, pre-release] pull_request: - branches: [main, pre-release] jobs: lint: runs-on: ubuntu-latest steps: - - name: checkout - uses: actions/checkout@v2 + - uses: actions/checkout@v4 - - name: setup-zig - uses: mlugg/setup-zig@v1 + - uses: mlugg/setup-zig@v1 with: version: 0.13.0 - name: lint - run: | - zig fmt --check src/ build.zig + run: zig fmt --check src/ build.zig check_style: runs-on: ubuntu-latest steps: - - name: checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up Python 3.10 uses: actions/setup-python@v3 @@ -36,42 +31,80 @@ jobs: - name: check style run: python scripts/style.py --check src + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: mlugg/setup-zig@v1 + with: + version: 0.13.0 + + - name: build + run: | + zig build test -Denable-tsan=true -Dno-run -p output/tsan + zig build test -Denable-tsan=false -Dno-run -p output/no-tsan/ + zig build test -Denable-tsan=true -Dblockstore=hashmap -Dfilter="ledger" -Dno-run -p output/hashmap + + # In order to not loose the execution permissions of the binaries, + # we tar everything together before uploading + - name: bundle + run: tar -cvf zig-build.tar output/ + + - uses: actions/upload-artifact@v4 + with: + name: zig-build + path: zig-build.tar + + # GitHub's CI runners for macos seem to alternate between using a x86 and an ARM machine + # which is annoying to handle and results in them having invalid hardware info configurations + # on the virtual machine. This leads to incorrect feature detection and the CI is flaky. + # + # If we at some point setup a self-hosted MacOS runner, we could work around the issue and + # enable full testing on MacOS! + build-macos: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: mlugg/setup-zig@v1 + with: + version: 0.13.0 + + - name: build + run: zig build -Dtarget=aarch64-macos -Dcpu=apple_m3 + test: strategy: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 - with: - submodules: recursive - - - name: setup-zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 + name: zig-build - name: test - run: | - zig build test -Denable-tsan=true - zig build test -Denable-tsan=true -Dblockstore=hashmap -Dfilter=ledger + run: output/tsan/bin/test + + - name: test-hashmap + run: output/hashmap/bin/test kcov_test: strategy: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 - - - name: setup-zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 + name: zig-build - name: Set up dependencies run: sudo apt-get update @@ -88,31 +121,44 @@ jobs: sudo ln libbfd-2.42-system.so libbfd-2.38-system.so || echo libbfd not found - name: run kcov - run: | - bash scripts/kcov_test.sh + run: bash scripts/kcov_test.sh output/no-tsan/bin/test - name: print coverage report - run: | - python scripts/parse_kcov.py kcov-output/test/coverage.json + run: python scripts/parse_kcov.py kcov-output/test/coverage.json + + build-release: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - uses: mlugg/setup-zig@v1 + with: + version: 0.13.0 + + - name: build + run: zig build sig fuzz -Doptimize=ReleaseSafe -Dno-run + + - name: bundle + run: tar -cvf zig-build-release.tar zig-out/ + + - uses: actions/upload-artifact@v4 + with: + name: zig-build-release + path: zig-build-release.tar gossip: strategy: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build-release timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 - with: - submodules: recursive - - name: setup-zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 + name: zig-build-release - - name: build release - run: zig build -Doptimize=ReleaseSafe - name: run gossip run: bash scripts/gossip_test.sh 120 # in seconds @@ -122,17 +168,13 @@ jobs: os: [ubuntu-latest] runs-on: ${{matrix.os}} timeout-minutes: 60 + needs: build-release steps: - - name: checkout - uses: actions/checkout@v2 - with: - submodules: recursive - - name: setup zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 - - name: build - run: zig build -Doptimize=ReleaseSafe -Dno-run fuzz + name: zig-build-release + - name: run run: ./zig-out/bin/fuzz gossip_service 19 10000 @@ -141,18 +183,14 @@ jobs: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build-release timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - submodules: recursive - - name: setup zig - uses: mlugg/setup-zig@v1 - with: - version: 0.13.0 - - name: build - run: zig build -Doptimize=ReleaseSafe -Dno-run fuzz + name: zig-build-release + - name: run run: ./zig-out/bin/fuzz gossip_table 19 100000 @@ -161,18 +199,14 @@ jobs: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build-release timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 - with: - submodules: recursive - - name: setup zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 - - name: build - run: zig build -Doptimize=ReleaseSafe -Dno-run fuzz + name: zig-build-release + - name: run run: ./zig-out/bin/fuzz allocators 19 10000 @@ -181,18 +215,14 @@ jobs: matrix: os: [ubuntu-latest] runs-on: ${{matrix.os}} + needs: build-release timeout-minutes: 60 steps: - - name: checkout - uses: actions/checkout@v2 - with: - submodules: recursive - - name: setup zig - uses: mlugg/setup-zig@v1 + - uses: actions/checkout@v4 + - uses: ./.github/actions/setup with: - version: 0.13.0 - - name: build - run: zig build -Doptimize=ReleaseSafe -Dno-run fuzz + name: zig-build-release + - name: run run: ./zig-out/bin/fuzz ledger 19 10000 diff --git a/build.zig b/build.zig index ef8278362..0e61eb077 100644 --- a/build.zig +++ b/build.zig @@ -11,20 +11,29 @@ pub fn build(b: *Build) void { const optimize = b.standardOptimizeOption(.{}); const filters = b.option([]const []const u8, "filter", "List of filters, used for example to filter unit tests by name"); // specified as a series like `-Dfilter="filter1" -Dfilter="filter2"` const enable_tsan = b.option(bool, "enable-tsan", "Enable TSan for the test suite"); - const no_run = b.option(bool, "no-run", "Do not run the selected step and install it") orelse false; const blockstore_db = b.option(BlockstoreDB, "blockstore", "Blockstore database backend") orelse .rocksdb; + const no_run = b.option(bool, "no-run", + \\Don't run any of the executables implied by the specified steps, only install them. + \\Use in conjunction with 'no-bin' to avoid installation as well. + ) orelse false; + const no_bin = b.option(bool, "no-bin", + \\Don't install any of the binaries implied by the specified steps, only run them. + \\Use in conjunction with 'no-run' to avoid running as well. + ) orelse false; // Build options const build_options = b.addOptions(); build_options.addOption(BlockstoreDB, "blockstore_db", blockstore_db); // CLI build steps - const sig_step = b.step("run", "Run the sig executable"); + const install_step = b.getInstallStep(); + const sig_step = b.step("sig", "Run the sig executable"); const test_step = b.step("test", "Run library tests"); const fuzz_step = b.step("fuzz", "Gossip fuzz testing"); const benchmark_step = b.step("benchmark", "Benchmark client"); const geyser_reader_step = b.step("geyser_reader", "Read data from geyser"); const svm_step = b.step("svm", "Run the SVM client"); + const docs_step = b.step("docs", "Generate and install documentation for the Sig Library"); // Dependencies const dep_opts = .{ .target = target, .optimize = optimize }; @@ -63,6 +72,9 @@ pub fn build(b: *Build) void { const sig_mod = b.addModule("sig", .{ .root_source_file = b.path("src/sig.zig"), }); + + sig_mod.addOptions("build-options", build_options); + sig_mod.addImport("zig-network", zig_network_module); sig_mod.addImport("base58-zig", base58_module); sig_mod.addImport("zig-cli", zig_cli_module); @@ -72,7 +84,6 @@ pub fn build(b: *Build) void { .rocksdb => sig_mod.addImport("rocksdb", rocksdb_mod), .hashmap => {}, } - sig_mod.addOptions("build-options", build_options); // main executable const sig_exe = b.addExecutable(.{ @@ -82,13 +93,17 @@ pub fn build(b: *Build) void { .optimize = optimize, .sanitize_thread = enable_tsan, }); + sig_step.dependOn(&sig_exe.step); + install_step.dependOn(&sig_exe.step); // make sure pyroscope's got enough info to profile sig_exe.build_id = .fast; sig_exe.root_module.omit_frame_pointer = false; sig_exe.root_module.strip = false; - b.installArtifact(sig_exe); + sig_exe.linkLibC(); + sig_exe.root_module.addOptions("build-options", build_options); + sig_exe.root_module.addImport("base58-zig", base58_module); sig_exe.root_module.addImport("httpz", httpz_mod); sig_exe.root_module.addImport("zig-cli", zig_cli_module); @@ -101,39 +116,33 @@ pub fn build(b: *Build) void { .rocksdb => sig_exe.root_module.addImport("rocksdb", rocksdb_mod), .hashmap => {}, } - sig_exe.root_module.addOptions("build-options", build_options); - sig_exe.linkLibC(); - - const main_exe_run = b.addRunArtifact(sig_exe); - main_exe_run.addArgs(b.args orelse &.{}); - if (!no_run) sig_step.dependOn(&main_exe_run.step); - if (no_run) sig_step.dependOn(&b.addInstallArtifact(sig_exe, .{}).step); - // docs for the Sig library - const sig_obj = b.addObject(.{ - .name = "sig", - .root_source_file = b.path("src/sig.zig"), - .target = target, - .optimize = .Debug, - }); + if (!no_bin) { + const sig_install = b.addInstallArtifact(sig_exe, .{}); + sig_step.dependOn(&sig_install.step); + install_step.dependOn(&sig_install.step); + } - const docs_step = b.step("docs", "Generate and install documentation for the Sig Library"); - const install_sig_docs = b.addInstallDirectory(.{ - .source_dir = sig_obj.getEmittedDocs(), - .install_dir = .prefix, - .install_subdir = "docs", - }); - docs_step.dependOn(&install_sig_docs.step); + if (!no_run) { + const sig_run = b.addRunArtifact(sig_exe); + sig_step.dependOn(&sig_run.step); + sig_run.addArgs(b.args orelse &.{}); + } // unit tests const unit_tests_exe = b.addTest(.{ .root_source_file = b.path("src/tests.zig"), .target = target, .optimize = optimize, - .filters = filters orelse &.{}, .sanitize_thread = enable_tsan, + .filters = filters orelse &.{}, }); - b.installArtifact(unit_tests_exe); + test_step.dependOn(&unit_tests_exe.step); + install_step.dependOn(&unit_tests_exe.step); + + unit_tests_exe.linkLibC(); + unit_tests_exe.root_module.addOptions("build-options", build_options); + unit_tests_exe.root_module.addImport("base58-zig", base58_module); unit_tests_exe.root_module.addImport("httpz", httpz_mod); unit_tests_exe.root_module.addImport("zig-network", zig_network_module); @@ -142,12 +151,17 @@ pub fn build(b: *Build) void { .rocksdb => unit_tests_exe.root_module.addImport("rocksdb", rocksdb_mod), .hashmap => {}, } - unit_tests_exe.root_module.addOptions("build-options", build_options); - unit_tests_exe.linkLibC(); - const unit_tests_exe_run = b.addRunArtifact(unit_tests_exe); - if (!no_run) test_step.dependOn(&unit_tests_exe_run.step); - if (no_run) test_step.dependOn(&b.addInstallArtifact(unit_tests_exe, .{}).step); + if (!no_bin) { + const unit_tests_install = b.addInstallArtifact(unit_tests_exe, .{}); + test_step.dependOn(&unit_tests_install.step); + install_step.dependOn(&unit_tests_install.step); + } + + if (!no_run) { + const unit_tests_run = b.addRunArtifact(unit_tests_exe); + test_step.dependOn(&unit_tests_run.step); + } // fuzz test const fuzz_exe = b.addExecutable(.{ @@ -157,22 +171,32 @@ pub fn build(b: *Build) void { .optimize = optimize, .sanitize_thread = enable_tsan, }); - b.installArtifact(fuzz_exe); + fuzz_step.dependOn(&fuzz_exe.step); + install_step.dependOn(&fuzz_exe.step); + + fuzz_exe.linkLibC(); + fuzz_exe.root_module.addOptions("build-options", build_options); + fuzz_exe.root_module.addImport("base58-zig", base58_module); fuzz_exe.root_module.addImport("zig-network", zig_network_module); fuzz_exe.root_module.addImport("httpz", httpz_mod); fuzz_exe.root_module.addImport("zstd", zstd_mod); - fuzz_exe.root_module.addOptions("build-options", build_options); switch (blockstore_db) { .rocksdb => fuzz_exe.root_module.addImport("rocksdb", rocksdb_mod), .hashmap => {}, } - fuzz_exe.linkLibC(); - const fuzz_exe_run = b.addRunArtifact(fuzz_exe); - fuzz_exe_run.addArgs(b.args orelse &.{}); - if (!no_run) fuzz_step.dependOn(&fuzz_exe_run.step); - if (no_run) fuzz_step.dependOn(&b.addInstallArtifact(fuzz_exe, .{}).step); + if (!no_bin) { + const fuzz_install = b.addInstallArtifact(fuzz_exe, .{}); + fuzz_step.dependOn(&fuzz_install.step); + install_step.dependOn(&fuzz_install.step); + } + + if (!no_run) { + const fuzz_run = b.addRunArtifact(fuzz_exe); + fuzz_step.dependOn(&fuzz_run.step); + fuzz_run.addArgs(b.args orelse &.{}); + } // benchmarks const benchmark_exe = b.addExecutable(.{ @@ -182,7 +206,12 @@ pub fn build(b: *Build) void { .optimize = optimize, .sanitize_thread = enable_tsan, }); - b.installArtifact(benchmark_exe); + benchmark_step.dependOn(&benchmark_exe.step); + install_step.dependOn(&benchmark_exe.step); + + benchmark_exe.linkLibC(); + benchmark_exe.root_module.addOptions("build-options", build_options); + benchmark_exe.root_module.addImport("base58-zig", base58_module); benchmark_exe.root_module.addImport("zig-network", zig_network_module); benchmark_exe.root_module.addImport("httpz", httpz_mod); @@ -192,13 +221,18 @@ pub fn build(b: *Build) void { .rocksdb => benchmark_exe.root_module.addImport("rocksdb", rocksdb_mod), .hashmap => {}, } - benchmark_exe.root_module.addOptions("build-options", build_options); - benchmark_exe.linkLibC(); - const benchmark_exe_run = b.addRunArtifact(benchmark_exe); - benchmark_exe_run.addArgs(b.args orelse &.{}); - if (!no_run) benchmark_step.dependOn(&benchmark_exe_run.step); - if (no_run) benchmark_step.dependOn(&b.addInstallArtifact(benchmark_exe, .{}).step); + if (!no_bin) { + const benchmark_install = b.addInstallArtifact(benchmark_exe, .{}); + benchmark_step.dependOn(&benchmark_install.step); + install_step.dependOn(&benchmark_install.step); + } + + if (!no_run) { + const benchmark_run = b.addRunArtifact(benchmark_exe); + benchmark_step.dependOn(&benchmark_run.step); + benchmark_run.addArgs(b.args orelse &.{}); + } // geyser reader const geyser_reader_exe = b.addExecutable(.{ @@ -208,14 +242,23 @@ pub fn build(b: *Build) void { .optimize = optimize, .sanitize_thread = enable_tsan, }); - b.installArtifact(geyser_reader_exe); + geyser_reader_step.dependOn(&geyser_reader_exe.step); + install_step.dependOn(&geyser_reader_exe.step); + geyser_reader_exe.root_module.addImport("sig", sig_mod); geyser_reader_exe.root_module.addImport("zig-cli", zig_cli_module); - const geyser_reader_exe_run = b.addRunArtifact(geyser_reader_exe); - geyser_reader_exe_run.addArgs(b.args orelse &.{}); - if (!no_run) geyser_reader_step.dependOn(&geyser_reader_exe_run.step); - if (no_run) geyser_reader_step.dependOn(&b.addInstallArtifact(geyser_reader_exe, .{}).step); + if (!no_bin) { + const geyser_reader_install = b.addInstallArtifact(geyser_reader_exe, .{}); + geyser_reader_step.dependOn(&geyser_reader_install.step); + install_step.dependOn(&geyser_reader_install.step); + } + + if (!no_run) { + const geyser_reader_run = b.addRunArtifact(geyser_reader_exe); + geyser_reader_step.dependOn(&geyser_reader_run.step); + geyser_reader_run.addArgs(b.args orelse &.{}); + } const svm_exe = b.addExecutable(.{ .name = "svm", @@ -224,13 +267,30 @@ pub fn build(b: *Build) void { .optimize = optimize, .sanitize_thread = enable_tsan, }); - b.installArtifact(svm_exe); + svm_step.dependOn(&svm_exe.step); + install_step.dependOn(&svm_exe.step); + svm_exe.root_module.addImport("sig", sig_mod); - const svm_exe_run = b.addRunArtifact(svm_exe); - svm_exe_run.addArgs(b.args orelse &.{}); - if (!no_run) svm_step.dependOn(&svm_exe_run.step); - if (no_run) svm_step.dependOn(&b.addInstallArtifact(svm_exe, .{}).step); + if (!no_bin) { + const svm_install = b.addInstallArtifact(svm_exe, .{}); + svm_step.dependOn(&svm_install.step); + install_step.dependOn(&svm_install.step); + } + + if (!no_run) { + const svm_run = b.addRunArtifact(svm_exe); + svm_step.dependOn(&svm_run.step); + svm_run.addArgs(b.args orelse &.{}); + } + + // docs for the Sig library + const install_sig_docs = b.addInstallDirectory(.{ + .source_dir = sig_exe.getEmittedDocs(), + .install_dir = .prefix, + .install_subdir = "docs", + }); + docs_step.dependOn(&install_sig_docs.step); } const BlockstoreDB = enum { diff --git a/build.zig.zon b/build.zig.zon index 75d34b2eb..eb3f6ed2f 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -32,8 +32,8 @@ .hash = "1220d25e3ef18526cb6c980b441f05dc96330bedd88a512dcb3aed775a981ce3707d", }, .lsquic = .{ - .url = "https://github.com/Syndica/lsquic/archive/7c34f2472f390482dffada7a65b8231f6b5b3a3b.tar.gz", - .hash = "1220ff572d14b18d9774a298db0b8aeb046800039ccc8e10646992b074e26ea4c38a", + .url = "https://github.com/Syndica/lsquic/archive/ed6ced0cbc6447f7135a32db491e398debdf8af7.tar.gz", + .hash = "12207247a06ac1e7f4ddd6f0fbca4bcdcdf78655432a07928a96c8ec456bdfca71e3", }, .xev = .{ .url = "https://github.com/mitchellh/libxev/archive/b8d1d93e5c899b27abbaa7df23b496c3e6a178c7.tar.gz", diff --git a/scripts/kcov_test.sh b/scripts/kcov_test.sh index 7778a1bd6..04a8fe0ee 100755 --- a/scripts/kcov_test.sh +++ b/scripts/kcov_test.sh @@ -13,21 +13,26 @@ # export PATH=$PATH:/path/to/kcov/build/src # ``` -set -euxo pipefail +set -exo pipefail echo "=> Cleaning up" rm -rf kcov-output mkdir kcov-output -echo "=> Building Sig" -zig build +if [ -z "$1" ]; then + echo "=> Building Sig" + zig build + test_bin="./zig-out/bin/test" +else + test_bin="$1" +fi echo "=> Running kcov on tests" kcov \ --include-pattern=src/ \ --exclude-pattern=$HOME/.cache \ kcov-output \ - ./zig-out/bin/test + $test_bin echo "=> Opening kcov-output/index.html" open kcov-output/index.html || echo "=> Failed to open kcov-output/index.html" diff --git a/src/accountsdb/download.zig b/src/accountsdb/download.zig index 24a848449..4838836b2 100644 --- a/src/accountsdb/download.zig +++ b/src/accountsdb/download.zig @@ -179,6 +179,7 @@ pub fn downloadSnapshotsFromGossip( output_dir: std.fs.Dir, min_mb_per_sec: usize, max_number_of_download_attempts: u64, + timeout: ?sig.time.Duration, ) !struct { std.fs.File, ?std.fs.File } { const logger = logger_.withScope(LOG_SCOPE); logger @@ -196,6 +197,7 @@ pub fn downloadSnapshotsFromGossip( var slow_peer_pubkeys = std.ArrayList(Pubkey).init(allocator); defer slow_peer_pubkeys.deinit(); + var function_duration = try std.time.Timer.start(); var download_attempts: u64 = 0; while (true) { std.time.sleep(5 * std.time.ns_per_s); // wait while gossip table updates @@ -205,6 +207,13 @@ pub fn downloadSnapshotsFromGossip( return error.UnableToDownloadSnapshot; } + if (timeout) |t| { + if (function_duration.read() > t.asNanos()) { + logger.err().logf("exceeded download timeout: {any}", .{t}); + return error.UnableToDownloadSnapshot; + } + } + // only hold gossip table lock for this block { const gossip_table, var gossip_table_lg = gossip_service.gossip_table_rw.readWithLock(); @@ -449,6 +458,7 @@ pub fn getOrDownloadAndUnpackSnapshot( min_snapshot_download_speed_mbs: usize = 20, trusted_validators: ?[]const Pubkey = null, max_number_of_download_attempts: u64 = default_adb_config.max_number_of_snapshot_download_attempts, + download_timeout: ?sig.time.Duration = null, }, ) !struct { FullAndIncrementalManifest, SnapshotFiles } { const logger = logger_.withScope(LOG_SCOPE); @@ -504,6 +514,7 @@ pub fn getOrDownloadAndUnpackSnapshot( snapshot_dir, @intCast(min_mb_per_sec), options.max_number_of_download_attempts, + options.download_timeout, ); defer full.close(); defer if (maybe_inc) |inc| inc.close(); diff --git a/src/benchmarks.zig b/src/benchmarks.zig index b18e989e4..3c72d8c7a 100644 --- a/src/benchmarks.zig +++ b/src/benchmarks.zig @@ -247,6 +247,7 @@ pub fn main() !void { .force_new_snapshot_download = true, .max_number_of_download_attempts = 50, .min_snapshot_download_speed_mbs = 10, + .download_timeout = Duration.fromMinutes(5), }, ) catch |err| { switch (err) { diff --git a/src/cmd/cmd.zig b/src/cmd/cmd.zig index 78b6a23bf..2f9fdd12e 100644 --- a/src/cmd/cmd.zig +++ b/src/cmd/cmd.zig @@ -64,6 +64,14 @@ pub fn run() !void { // _ = gossip_value_gpa.deinit(); // Commented out for no leeks } + var shred_version_option = cli.Option{ + .long_name = "shred-version", + .help = "The shred version for the network", + .value_ref = cli.mkRef(&config.current.shred_version), + .required = false, + .value_name = "Shred Version", + }; + var gossip_host_option = cli.Option{ .long_name = "gossip-host", .help = @@ -417,6 +425,7 @@ pub fn run() !void { &gossip_spy_node_option, &gossip_dump_option, &network_option, + &shred_version_option, }, .target = .{ .action = .{ @@ -434,6 +443,7 @@ pub fn run() !void { , }, .options = &.{ + &shred_version_option, // gossip &gossip_host_option, &gossip_port_option, @@ -492,6 +502,7 @@ pub fn run() !void { \\ for testnet or another `-u` for mainnet/devnet. }, .options = &.{ + &shred_version_option, // gossip &gossip_host_option, &gossip_port_option, @@ -529,6 +540,7 @@ pub fn run() !void { , }, .options = &.{ + &shred_version_option, // where to download the snapshot &snapshot_dir_option, // download options @@ -626,6 +638,7 @@ pub fn run() !void { , }, .options = &.{ + &shred_version_option, // gossip &gossip_host_option, &gossip_port_option, @@ -665,6 +678,7 @@ pub fn run() !void { , }, .options = &.{ + &shred_version_option, // gossip &network_option, &gossip_host_option, @@ -1266,7 +1280,12 @@ const AppBase = struct { allocator, entrypoints, ); - const my_shred_version = echo_data.shred_version orelse 0; + + // zig fmt: off + const my_shred_version = config.current.shred_version + orelse echo_data.shred_version + orelse 0; + // zig fmt: on const config_host = config.current.gossip.getHost() catch null; const my_ip = config_host orelse echo_data.ip orelse IpAddr.newIpv4(127, 0, 0, 1); @@ -1552,6 +1571,7 @@ fn downloadSnapshot() !void { snapshot_dir, @intCast(min_mb_per_sec), config.current.accounts_db.max_number_of_snapshot_download_attempts, + null, ); defer full_file.close(); defer if (maybe_inc_file) |inc_file| inc_file.close(); diff --git a/src/cmd/config.zig b/src/cmd/config.zig index 19f4a30bf..0481294a8 100644 --- a/src/cmd/config.zig +++ b/src/cmd/config.zig @@ -28,6 +28,7 @@ pub const Config = struct { // general config log_level: LogLevel = .debug, metrics_port: u16 = 12345, + shred_version: ?u16 = null, pub fn genesisFilePath(self: Config) error{UnknownCluster}!?[]const u8 { return if (self.genesis_file_path) |provided_path| diff --git a/src/ledger/shred_inserter/shred_inserter.zig b/src/ledger/shred_inserter/shred_inserter.zig index d4a1cc0b8..8815ad9dd 100644 --- a/src/ledger/shred_inserter/shred_inserter.zig +++ b/src/ledger/shred_inserter/shred_inserter.zig @@ -206,12 +206,13 @@ pub const ShredInserter = struct { switch (shred) { .data => |data_shred| { if (options.shred_tracker) |tracker| { - tracker.registerDataShred(&shred.data, milli_timestamp) catch |err| + tracker.registerDataShred(&shred.data, milli_timestamp) catch |err| { switch (err) { - error.SlotUnderflow, error.SlotOverflow => { - self.metrics.register_shred_error.observe(@errorCast(err)); - }, - else => return err, + error.SlotUnderflow, error.SlotOverflow => { + self.metrics.register_shred_error.observe(@errorCast(err)); + }, + else => return err, + } }; } if (self.checkInsertDataShred( @@ -300,12 +301,13 @@ pub const ShredInserter = struct { continue; } if (options.shred_tracker) |tracker| { - tracker.registerDataShred(&shred.data, milli_timestamp) catch |err| + tracker.registerDataShred(&shred.data, milli_timestamp) catch |err| { switch (err) { - error.SlotUnderflow, error.SlotOverflow => { - self.metrics.register_shred_error.observe(@errorCast(err)); - }, - else => return err, + error.SlotUnderflow, error.SlotOverflow => { + self.metrics.register_shred_error.observe(@errorCast(err)); + }, + else => return err, + } }; } if (self.checkInsertDataShred( diff --git a/src/shred_network/shred_tracker.zig b/src/shred_network/shred_tracker.zig index 86867dd10..50506959d 100644 --- a/src/shred_network/shred_tracker.zig +++ b/src/shred_network/shred_tracker.zig @@ -267,6 +267,7 @@ const bit_set = struct { fn maskBit(index: usize) ShredSet.MaskInt { return @as(ShredSet.MaskInt, 1) << @as(ShredSet.ShiftInt, @truncate(index)); } + fn maskIndex(index: usize) usize { return index >> @bitSizeOf(ShredSet.ShiftInt); } diff --git a/src/time/time.zig b/src/time/time.zig index 4a89c46db..4594ee266 100644 --- a/src/time/time.zig +++ b/src/time/time.zig @@ -499,6 +499,10 @@ pub const Duration = struct { return .{ .ns = 0 }; } + pub fn fromMinutes(m: u64) Duration { + return .{ .ns = m * std.time.ns_per_min }; + } + pub fn fromSecs(s: u64) Duration { return .{ .ns = s * std.time.ns_per_s }; }