diff --git a/.github/workflows-source/ci-main.yml b/.github/workflows-source/ci-main.yml index 4cd27322afa..e0634803029 100644 --- a/.github/workflows-source/ci-main.yml +++ b/.github/workflows-source/ci-main.yml @@ -94,7 +94,7 @@ jobs: name: Bazel Test All <<: *dind-large-setup runs-on: - group: ln1 + group: zh1 labels: dind-large env: CLOUD_CREDENTIALS_CONTENT: ${{ secrets.CLOUD_CREDENTIALS_CONTENT }} diff --git a/.github/workflows/ci-main.yml b/.github/workflows/ci-main.yml index 2397ae12455..fd33de166dd 100644 --- a/.github/workflows/ci-main.yml +++ b/.github/workflows/ci-main.yml @@ -35,7 +35,7 @@ jobs: -e NODE_NAME --privileged --cgroupns host -v /cache:/cache -v /var/sysimage:/var/sysimage -v /var/tmp:/var/tmp -v /ceph-s3-info:/ceph-s3-info timeout-minutes: 90 runs-on: - group: ln1 + group: zh1 labels: dind-large env: CLOUD_CREDENTIALS_CONTENT: ${{ secrets.CLOUD_CREDENTIALS_CONTENT }} diff --git a/Cargo.Bazel.Fuzzing.json.lock b/Cargo.Bazel.Fuzzing.json.lock index 39eacb99a29..8fb4c7dadd3 100644 --- a/Cargo.Bazel.Fuzzing.json.lock +++ b/Cargo.Bazel.Fuzzing.json.lock @@ -1,5 +1,5 @@ { - "checksum": "d2f87afb142226b1d1eb89350d6c0bd83a38fc74283613f613b6058561be115f", + "checksum": "4ae7b3c97aa8a5afd939403aaa5919e419cc497d77a0fef55b643a99ef0c998b", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -10567,14 +10567,14 @@ ], "license_file": "LICENSE-APACHE" }, - "canbench 0.1.8": { + "canbench 0.1.9": { "name": "canbench", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench/0.1.8/download", - "sha256": "cb548f9e006ad29b160d37e07435c499af7d2741918e18d95ddc87dfe97a0b8d" + "url": "https://static.crates.io/crates/canbench/0.1.9/download", + "sha256": "c3eba78c84aa78e67f27c5c5a7d6c3d33cc301f95886171748e8f6ed8a31b258" } }, "targets": [ @@ -10611,7 +10611,7 @@ "deps": { "common": [ { - "id": "canbench-rs 0.1.8", + "id": "canbench-rs 0.1.9", "target": "canbench_rs" }, { @@ -10670,7 +10670,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -10678,14 +10678,14 @@ ], "license_file": null }, - "canbench-rs 0.1.8": { + "canbench-rs 0.1.9": { "name": "canbench-rs", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench-rs/0.1.8/download", - "sha256": "497d900e11ab1891dd9743dd45dbeaada540ce323aa1adc7fc0ce1da2c6e86ff" + "url": "https://static.crates.io/crates/canbench-rs/0.1.9/download", + "sha256": "588701e2d05679b79603acca6a6f8b78fe0d21f5f7a9c06fe689f769ae797008" } }, "targets": [ @@ -10728,13 +10728,13 @@ "proc_macro_deps": { "common": [ { - "id": "canbench-rs-macros 0.1.8", + "id": "canbench-rs-macros 0.1.9", "target": "canbench_rs_macros" } ], "selects": {} }, - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -10742,14 +10742,14 @@ ], "license_file": null }, - "canbench-rs-macros 0.1.8": { + "canbench-rs-macros 0.1.9": { "name": "canbench-rs-macros", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench-rs-macros/0.1.8/download", - "sha256": "5a5509bcfe6eeb86f057d46fbf20a2ba6b6bf9a1099b053a8f491cd7a909dfa6" + "url": "https://static.crates.io/crates/canbench-rs-macros/0.1.9/download", + "sha256": "e05fe21a7dfc85c3be8e40edbbcb3fe23b4c070fac4741eff18129f1d0f11aa9" } }, "targets": [ @@ -10789,7 +10789,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -13415,7 +13415,7 @@ "Git": { "remote": "https://github.com/dfinity/cloudflare-rs.git", "commitish": { - "Rev": "a6538a036926bd756986c9c0a5de356daef48881" + "Rev": "0b1805bf11ed526445712559e6f18d3b8e024b06" }, "strip_prefix": "cloudflare" } @@ -19663,11 +19663,11 @@ "target": "cached" }, { - "id": "canbench 0.1.8", + "id": "canbench 0.1.9", "target": "canbench" }, { - "id": "canbench-rs 0.1.8", + "id": "canbench-rs 0.1.9", "target": "canbench_rs" }, { @@ -20147,6 +20147,10 @@ "id": "maxminddb 0.24.0", "target": "maxminddb" }, + { + "id": "memmap2 0.9.5", + "target": "memmap2" + }, { "id": "metrics-proxy 0.1.0", "target": "metrics_proxy" @@ -43410,6 +43414,56 @@ ], "license_file": "LICENSE-APACHE" }, + "memmap2 0.9.5": { + "name": "memmap2", + "version": "0.9.5", + "package_url": "https://github.com/RazrFalcon/memmap2-rs", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/memmap2/0.9.5/download", + "sha256": "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" + } + }, + "targets": [ + { + "Library": { + "crate_name": "memmap2", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "memmap2", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "deps": { + "common": [], + "selects": { + "cfg(unix)": [ + { + "id": "libc 0.2.158", + "target": "libc" + } + ] + } + }, + "edition": "2018", + "version": "0.9.5" + }, + "license": "MIT OR Apache-2.0", + "license_ids": [ + "Apache-2.0", + "MIT" + ], + "license_file": "LICENSE-APACHE" + }, "memoffset 0.6.5": { "name": "memoffset", "version": "0.6.5", @@ -90188,7 +90242,7 @@ } }, "binary_crates": [ - "canbench 0.1.8", + "canbench 0.1.9", "ic-wasm 0.8.4", "metrics-proxy 0.1.0" ], @@ -91296,8 +91350,8 @@ "byteorder 1.5.0", "bytes 1.9.0", "cached 0.49.2", - "canbench 0.1.8", - "canbench-rs 0.1.8", + "canbench 0.1.9", + "canbench-rs 0.1.9", "candid 0.10.13", "candid_parser 0.1.2", "cargo_metadata 0.14.2", @@ -91421,6 +91475,7 @@ "mach2 0.4.2", "maplit 1.0.2", "maxminddb 0.24.0", + "memmap2 0.9.5", "metrics-proxy 0.1.0", "minicbor 0.19.1", "minicbor-derive 0.13.0", diff --git a/Cargo.Bazel.Fuzzing.toml.lock b/Cargo.Bazel.Fuzzing.toml.lock index fff3d677827..bcdc0c24d2f 100644 --- a/Cargo.Bazel.Fuzzing.toml.lock +++ b/Cargo.Bazel.Fuzzing.toml.lock @@ -1784,9 +1784,9 @@ dependencies = [ [[package]] name = "canbench" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb548f9e006ad29b160d37e07435c499af7d2741918e18d95ddc87dfe97a0b8d" +checksum = "c3eba78c84aa78e67f27c5c5a7d6c3d33cc301f95886171748e8f6ed8a31b258" dependencies = [ "canbench-rs", "candid", @@ -1806,9 +1806,9 @@ dependencies = [ [[package]] name = "canbench-rs" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497d900e11ab1891dd9743dd45dbeaada540ce323aa1adc7fc0ce1da2c6e86ff" +checksum = "588701e2d05679b79603acca6a6f8b78fe0d21f5f7a9c06fe689f769ae797008" dependencies = [ "canbench-rs-macros", "candid", @@ -1818,9 +1818,9 @@ dependencies = [ [[package]] name = "canbench-rs-macros" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5509bcfe6eeb86f057d46fbf20a2ba6b6bf9a1099b053a8f491cd7a909dfa6" +checksum = "e05fe21a7dfc85c3be8e40edbbcb3fe23b4c070fac4741eff18129f1d0f11aa9" dependencies = [ "proc-macro2", "quote", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "cloudflare" version = "0.12.0" -source = "git+https://github.com/dfinity/cloudflare-rs.git?rev=a6538a036926bd756986c9c0a5de356daef48881#a6538a036926bd756986c9c0a5de356daef48881" +source = "git+https://github.com/dfinity/cloudflare-rs.git?rev=0b1805bf11ed526445712559e6f18d3b8e024b06#0b1805bf11ed526445712559e6f18d3b8e024b06" dependencies = [ "chrono", "http 0.2.12", @@ -3233,7 +3233,7 @@ dependencies = [ "ciborium", "cidr", "clap 4.5.20", - "cloudflare 0.12.0 (git+https://github.com/dfinity/cloudflare-rs.git?rev=a6538a036926bd756986c9c0a5de356daef48881)", + "cloudflare 0.12.0 (git+https://github.com/dfinity/cloudflare-rs.git?rev=0b1805bf11ed526445712559e6f18d3b8e024b06)", "colored", "comparable", "console 0.11.3", @@ -3345,6 +3345,7 @@ dependencies = [ "mach2", "maplit", "maxminddb", + "memmap2 0.9.5", "metrics-proxy", "minicbor", "minicbor-derive", @@ -7185,6 +7186,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.6.5" @@ -11454,7 +11464,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fac08504d60cf5bdffeb8a6a028f1a4868a5da1098bb19eb46239440039163fb" dependencies = [ "debugid", - "memmap2", + "memmap2 0.5.10", "stable_deref_trait", "uuid", ] diff --git a/Cargo.Bazel.json.lock b/Cargo.Bazel.json.lock index 66ff9bc0502..8fe5337bd2b 100644 --- a/Cargo.Bazel.json.lock +++ b/Cargo.Bazel.json.lock @@ -1,5 +1,5 @@ { - "checksum": "bfc8ee26e96128dcc97b852714a8aba08b1b808bc98b6f33406e6d73098eb224", + "checksum": "ac36051bb3fd1c488e3c6fdbdfc95fff29f3760abf119cb1fa2e1d829f1251f8", "crates": { "abnf 0.12.0": { "name": "abnf", @@ -10484,14 +10484,14 @@ ], "license_file": "LICENSE-APACHE" }, - "canbench 0.1.8": { + "canbench 0.1.9": { "name": "canbench", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench/0.1.8/download", - "sha256": "cb548f9e006ad29b160d37e07435c499af7d2741918e18d95ddc87dfe97a0b8d" + "url": "https://static.crates.io/crates/canbench/0.1.9/download", + "sha256": "c3eba78c84aa78e67f27c5c5a7d6c3d33cc301f95886171748e8f6ed8a31b258" } }, "targets": [ @@ -10528,7 +10528,7 @@ "deps": { "common": [ { - "id": "canbench-rs 0.1.8", + "id": "canbench-rs 0.1.9", "target": "canbench_rs" }, { @@ -10587,7 +10587,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -10595,14 +10595,14 @@ ], "license_file": null }, - "canbench-rs 0.1.8": { + "canbench-rs 0.1.9": { "name": "canbench-rs", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench-rs/0.1.8/download", - "sha256": "497d900e11ab1891dd9743dd45dbeaada540ce323aa1adc7fc0ce1da2c6e86ff" + "url": "https://static.crates.io/crates/canbench-rs/0.1.9/download", + "sha256": "588701e2d05679b79603acca6a6f8b78fe0d21f5f7a9c06fe689f769ae797008" } }, "targets": [ @@ -10645,13 +10645,13 @@ "proc_macro_deps": { "common": [ { - "id": "canbench-rs-macros 0.1.8", + "id": "canbench-rs-macros 0.1.9", "target": "canbench_rs_macros" } ], "selects": {} }, - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -10659,14 +10659,14 @@ ], "license_file": null }, - "canbench-rs-macros 0.1.8": { + "canbench-rs-macros 0.1.9": { "name": "canbench-rs-macros", - "version": "0.1.8", + "version": "0.1.9", "package_url": "https://github.com/dfinity/canbench", "repository": { "Http": { - "url": "https://static.crates.io/crates/canbench-rs-macros/0.1.8/download", - "sha256": "5a5509bcfe6eeb86f057d46fbf20a2ba6b6bf9a1099b053a8f491cd7a909dfa6" + "url": "https://static.crates.io/crates/canbench-rs-macros/0.1.9/download", + "sha256": "e05fe21a7dfc85c3be8e40edbbcb3fe23b4c070fac4741eff18129f1d0f11aa9" } }, "targets": [ @@ -10706,7 +10706,7 @@ "selects": {} }, "edition": "2021", - "version": "0.1.8" + "version": "0.1.9" }, "license": "Apache-2.0", "license_ids": [ @@ -19491,11 +19491,11 @@ "target": "cached" }, { - "id": "canbench 0.1.8", + "id": "canbench 0.1.9", "target": "canbench" }, { - "id": "canbench-rs 0.1.8", + "id": "canbench-rs 0.1.9", "target": "canbench_rs" }, { @@ -19975,6 +19975,10 @@ "id": "maxminddb 0.24.0", "target": "maxminddb" }, + { + "id": "memmap2 0.9.5", + "target": "memmap2" + }, { "id": "metrics-proxy 0.1.0", "target": "metrics_proxy" @@ -43250,6 +43254,56 @@ ], "license_file": "LICENSE-APACHE" }, + "memmap2 0.9.5": { + "name": "memmap2", + "version": "0.9.5", + "package_url": "https://github.com/RazrFalcon/memmap2-rs", + "repository": { + "Http": { + "url": "https://static.crates.io/crates/memmap2/0.9.5/download", + "sha256": "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" + } + }, + "targets": [ + { + "Library": { + "crate_name": "memmap2", + "crate_root": "src/lib.rs", + "srcs": { + "allow_empty": true, + "include": [ + "**/*.rs" + ] + } + } + } + ], + "library_target_name": "memmap2", + "common_attrs": { + "compile_data_glob": [ + "**" + ], + "deps": { + "common": [], + "selects": { + "cfg(unix)": [ + { + "id": "libc 0.2.158", + "target": "libc" + } + ] + } + }, + "edition": "2018", + "version": "0.9.5" + }, + "license": "MIT OR Apache-2.0", + "license_ids": [ + "Apache-2.0", + "MIT" + ], + "license_file": "LICENSE-APACHE" + }, "memoffset 0.6.5": { "name": "memoffset", "version": "0.6.5", @@ -90179,7 +90233,7 @@ } }, "binary_crates": [ - "canbench 0.1.8", + "canbench 0.1.9", "ic-wasm 0.8.4", "metrics-proxy 0.1.0" ], @@ -91209,8 +91263,8 @@ "byteorder 1.5.0", "bytes 1.9.0", "cached 0.49.2", - "canbench 0.1.8", - "canbench-rs 0.1.8", + "canbench 0.1.9", + "canbench-rs 0.1.9", "candid 0.10.13", "candid_parser 0.1.2", "cargo_metadata 0.14.2", @@ -91334,6 +91388,7 @@ "mach2 0.4.2", "maplit 1.0.2", "maxminddb 0.24.0", + "memmap2 0.9.5", "metrics-proxy 0.1.0", "minicbor 0.19.1", "minicbor-derive 0.13.0", diff --git a/Cargo.Bazel.toml.lock b/Cargo.Bazel.toml.lock index 37a2cf528fd..639e44ad010 100644 --- a/Cargo.Bazel.toml.lock +++ b/Cargo.Bazel.toml.lock @@ -1785,9 +1785,9 @@ dependencies = [ [[package]] name = "canbench" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb548f9e006ad29b160d37e07435c499af7d2741918e18d95ddc87dfe97a0b8d" +checksum = "c3eba78c84aa78e67f27c5c5a7d6c3d33cc301f95886171748e8f6ed8a31b258" dependencies = [ "canbench-rs", "candid", @@ -1807,9 +1807,9 @@ dependencies = [ [[package]] name = "canbench-rs" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497d900e11ab1891dd9743dd45dbeaada540ce323aa1adc7fc0ce1da2c6e86ff" +checksum = "588701e2d05679b79603acca6a6f8b78fe0d21f5f7a9c06fe689f769ae797008" dependencies = [ "canbench-rs-macros", "candid", @@ -1819,9 +1819,9 @@ dependencies = [ [[package]] name = "canbench-rs-macros" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5509bcfe6eeb86f057d46fbf20a2ba6b6bf9a1099b053a8f491cd7a909dfa6" +checksum = "e05fe21a7dfc85c3be8e40edbbcb3fe23b4c070fac4741eff18129f1d0f11aa9" dependencies = [ "proc-macro2", "quote", @@ -3334,6 +3334,7 @@ dependencies = [ "mach2", "maplit", "maxminddb", + "memmap2 0.9.5", "metrics-proxy", "minicbor", "minicbor-derive", @@ -7176,6 +7177,15 @@ dependencies = [ "libc", ] +[[package]] +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + [[package]] name = "memoffset" version = "0.6.5" @@ -11450,7 +11460,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e0e9bc48b3852f36a84f8d0da275d50cb3c2b88b59b9ec35fdd8b7fa239e37d" dependencies = [ "debugid", - "memmap2", + "memmap2 0.5.10", "stable_deref_trait", "uuid", ] diff --git a/Cargo.lock b/Cargo.lock index bec2f9f410a..cdb26700e63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16006,8 +16006,10 @@ dependencies = [ "ic-types", "lazy_static", "libc", + "memmap2", "nix 0.24.3", "proptest 1.6.0", + "rayon", "slog", "tempfile", ] diff --git a/Cargo.toml b/Cargo.toml index 6a3d156ddfb..ab2a603df7d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -607,6 +607,7 @@ libflate = "2.1.0" libnss = "0.5.0" local-ip-address = "0.5.6" macaddr = "1.0" +memmap2 = "0.9.5" minicbor = { version = "0.19.1", features = ["alloc", "derive"] } minicbor-derive = "0.13.0" mockall = "0.13.0" diff --git a/bazel/canbench.bzl b/bazel/canbench.bzl index 6a570c8e666..f07def08aca 100644 --- a/bazel/canbench.bzl +++ b/bazel/canbench.bzl @@ -5,7 +5,7 @@ This module defines functions to run benchmarks using canbench. load("@rules_rust//rust:defs.bzl", "rust_binary") load("//bazel:canisters.bzl", "wasm_rust_binary_rule") -def rust_canbench(name, results_file, add_test = False, opt = "3", **kwargs): +def rust_canbench(name, results_file, add_test = False, opt = "3", noise_threshold = None, **kwargs): """ Run a Rust benchmark using canbench. This creates 2 executable rules: :${name} for running the benchmark and :${name}_update for @@ -17,6 +17,8 @@ def rust_canbench(name, results_file, add_test = False, opt = "3", **kwargs): add_test: If True add an additional :${name}_test rule that fails if canbench benchmark fails. opt: The optimization level to use for the rust_binary compilation. **kwargs: Additional arguments to pass to rust_binary. + noise_threshold: The noise threshold to use for the benchmark. If None, the default value from + canbench is used. """ rust_binary( @@ -50,6 +52,9 @@ def rust_canbench(name, results_file, add_test = False, opt = "3", **kwargs): "WORKSPACE": "$(rootpath //:WORKSPACE.bazel)", } + if noise_threshold: + env["NOISE_THRESHOLD"] = str(noise_threshold) + native.sh_binary( name = name, testonly = True, diff --git a/bazel/canbench.sh b/bazel/canbench.sh index f47c6216275..3dddd90528f 100755 --- a/bazel/canbench.sh +++ b/bazel/canbench.sh @@ -9,6 +9,8 @@ # - updated if --update is specified. # - used for comparison if it's not empty. # - WASM_PATH: Path to the wasm file to be benchmarked. +# - NOISE_THRESHOLD: The noise threshold in percentage. If the difference between the current +# benchmark and the results file is above this threshold, the benchmark test will fail. set -eEuo pipefail @@ -28,8 +30,6 @@ if [ -s "${REPO_RESULTS_PATH}" ]; then echo " ${REPO_RESULTS_PATH}" >>${CANBENCH_YML} fi -echo ${RUNFILES} - if [ $# -eq 0 ]; then # Runs the benchmark without updating the results file. ${CANBENCH_BIN} --no-runtime-integrity-check --runtime-path ${POCKET_IC_BIN} @@ -43,8 +43,9 @@ elif [ "$1" = "--update" ]; then cp "${RUNFILES}/canbench_results.yml" "${REPO_RESULTS_PATH}" fi else + NOISE_THRESHOLD_ARG="${NOISE_THRESHOLD:+--noise-threshold ${NOISE_THRESHOLD}}" # Runs the benchmark test that fails if the diffs are new or above the threshold. - ${CANBENCH_BIN} --no-runtime-integrity-check --runtime-path ${POCKET_IC_BIN} >$CANBENCH_OUTPUT + ${CANBENCH_BIN} --no-runtime-integrity-check --runtime-path ${POCKET_IC_BIN} ${NOISE_THRESHOLD_ARG} >$CANBENCH_OUTPUT if grep -q "(regress\|(improved by \|(new)" "$CANBENCH_OUTPUT"; then cat "$CANBENCH_OUTPUT" echo "**\`$REPO_RESULTS_PATH\` is not up to date ❌** diff --git a/bazel/external_crates.bzl b/bazel/external_crates.bzl index fcba520a3c5..f41b12bfc9f 100644 --- a/bazel/external_crates.bzl +++ b/bazel/external_crates.bzl @@ -332,10 +332,10 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable default_features = False, ), "canbench": crate.spec( - version = "^0.1.8", + version = "^0.1.9", ), "canbench-rs": crate.spec( - version = "^0.1.8", + version = "^0.1.9", ), "candid": crate.spec( version = "^0.10.13", @@ -779,6 +779,9 @@ def external_crates_repository(name, cargo_lockfile, lockfile, sanitizers_enable "macaddr": crate.spec( version = "^1.0", ), + "memmap2": crate.spec( + version = "^0.9.5", + ), "mach2": crate.spec( # Wasmtime depends on 0.4.2 but specifies 0.4.1. # Enforce 0.4.2 using a dummy dependency until diff --git a/rs/bitcoin/ckbtc/minter/test_resources/mainnet_events.gz b/rs/bitcoin/ckbtc/minter/test_resources/mainnet_events.gz index 074834bdb27..8c1a4ea3d4a 100644 Binary files a/rs/bitcoin/ckbtc/minter/test_resources/mainnet_events.gz and b/rs/bitcoin/ckbtc/minter/test_resources/mainnet_events.gz differ diff --git a/rs/bitcoin/ckbtc/minter/tests/replay_events.rs b/rs/bitcoin/ckbtc/minter/tests/replay_events.rs index 238e635218c..48f0688be4a 100644 --- a/rs/bitcoin/ckbtc/minter/tests/replay_events.rs +++ b/rs/bitcoin/ckbtc/minter/tests/replay_events.rs @@ -1,3 +1,10 @@ +//! To refresh the stored events on disk, call the tests as follows +//! ``` +//! bazel test --spawn_strategy=standalone //rs/bitcoin/ckbtc/minter:ckbtc_minter_replay_events_tests --test_env=RETRIEVE_MINTER_EVENTS=true --test_arg "should_replay_events_for_mainnet" --test_timeout 900 +//! ``` +//! The parameter `spawn_strategy=standalone` is needed, because the events will be fetched from the running canister and the default sandbox doesn't allow it. +//! The parameter `test_env=RETRIEVE_MINTER_EVENTS=true` is needed to enable the fetching of the events. + use candid::{CandidType, Deserialize, Principal}; use ic_agent::Agent; use ic_ckbtc_minter::state::eventlog::{replay, Event, EventType}; @@ -18,7 +25,7 @@ fn assert_useless_events_is_empty(events: impl Iterator) { assert_eq!(count, 0); } -async fn should_migrate_events_for(file: GetEventsFile) -> CkBtcMinterState { +async fn should_migrate_events_for(file: impl GetEventsFile) -> CkBtcMinterState { use ic_ckbtc_minter::storage::{decode_event, encode_event, migrate_events}; use ic_stable_structures::{ log::Log as StableLog, @@ -53,44 +60,38 @@ async fn should_migrate_events_for(file: GetEventsFile) -> CkBtcMinterState { #[tokio::test] async fn should_migrate_events_for_mainnet() { - let state = should_migrate_events_for(GetEventsFile::Mainnet).await; + let state = should_migrate_events_for(Mainnet).await; assert_eq!(state.btc_network, Network::Mainnet); - assert_eq!(state.get_total_btc_managed(), 21_723_786_340); + assert_eq!(state.get_total_btc_managed(), 20_209_150_152); } #[tokio::test] async fn should_migrate_events_for_testnet() { - let state = should_migrate_events_for(GetEventsFile::Testnet).await; + let state = should_migrate_events_for(Testnet).await; assert_eq!(state.btc_network, Network::Testnet); - assert_eq!(state.get_total_btc_managed(), 16578205978); + assert_eq!(state.get_total_btc_managed(), 16_578_205_978); } #[tokio::test] async fn should_replay_events_for_mainnet() { - GetEventsFile::Mainnet - .retrieve_and_store_events_if_env() - .await; + Mainnet.retrieve_and_store_events_if_env().await; - let state = - replay::(GetEventsFile::Mainnet.deserialize().events.into_iter()) - .expect("Failed to replay events"); + let state = replay::(Mainnet.deserialize().events.into_iter()) + .expect("Failed to replay events"); state .check_invariants() .expect("Failed to check invariants"); assert_eq!(state.btc_network, Network::Mainnet); - assert_eq!(state.get_total_btc_managed(), 21_723_786_340); + assert_eq!(state.get_total_btc_managed(), 20_209_150_152); } #[tokio::test] async fn should_replay_events_for_testnet() { - GetEventsFile::Testnet - .retrieve_and_store_events_if_env() - .await; + Testnet.retrieve_and_store_events_if_env().await; - let state = - replay::(GetEventsFile::Testnet.deserialize().events.into_iter()) - .expect("Failed to replay events"); + let state = replay::(Testnet.deserialize().events.into_iter()) + .expect("Failed to replay events"); state .check_invariants() .expect("Failed to check invariants"); @@ -106,19 +107,21 @@ async fn should_replay_events_for_testnet() { #[test] #[ignore] fn should_replay_events_and_check_invariants() { - for file in [GetEventsFile::Mainnet, GetEventsFile::Testnet] { + fn test(file: impl GetEventsFile + std::fmt::Debug) { let events = file.deserialize(); println!("Replaying {} {:?} events", events.total_event_count, file); let _state = replay::(events.events.into_iter()) .expect("Failed to replay events"); } + test(Mainnet); + test(Testnet); } // It's not clear why those events are here in the first place // but this test ensures that the number of such events doesn't grow. #[tokio::test] async fn should_not_grow_number_of_useless_events() { - for file in [GetEventsFile::Mainnet, GetEventsFile::Testnet] { + fn test(file: impl GetEventsFile) -> (u64, Vec) { let events = file.deserialize(); let received_utxo_to_minter_with_empty_utxos = EventType::ReceivedUtxos { mint_txid: None, @@ -128,21 +131,19 @@ async fn should_not_grow_number_of_useless_events() { let useless_events_indexes = assert_useless_events_eq(&events.events, &received_utxo_to_minter_with_empty_utxos); - - match file { - GetEventsFile::Mainnet => { - assert_eq!(events.total_event_count, 432_050); - assert_eq!(useless_events_indexes.len(), 409_141); - assert_eq!(useless_events_indexes.last(), Some(&411_301_usize)); - } - GetEventsFile::Testnet => { - assert_eq!(events.total_event_count, 46_815); - assert_eq!(useless_events_indexes.len(), 4_044); - assert_eq!(useless_events_indexes.last(), Some(&4_614_usize)); - } - } + (events.total_event_count, useless_events_indexes) } + let (total_event_count, useless_events_indexes) = test(Mainnet); + assert_eq!(total_event_count, 443_137); + assert_eq!(useless_events_indexes.len(), 409_141); + assert_eq!(useless_events_indexes.last(), Some(&411_301_usize)); + + let (total_event_count, useless_events_indexes) = test(Testnet); + assert_eq!(total_event_count, 46_815); + assert_eq!(useless_events_indexes.len(), 4_044); + assert_eq!(useless_events_indexes.last(), Some(&4_614_usize)); + fn assert_useless_events_eq( events: &[Event], expected_useless_event: &EventType, @@ -162,18 +163,21 @@ async fn should_not_grow_number_of_useless_events() { } #[derive(Debug)] -enum GetEventsFile { - Mainnet, - Testnet, -} +struct Mainnet; + +#[derive(Debug)] +struct Testnet; + +trait GetEventsFile { + // TODO (XC-261): + // These associated types are meant to deal with the the type difference in existing + // event logs between mainnet (with timestamps) and testnet (without timestamps) + // when we deserialize them for processing. This difference will go away once + // we re-deploy the testnet canister. These types (and the GetEventsFile trait) + // should be consolidated by then. + type EventType: CandidType + for<'a> Deserialize<'a> + Into; + type ResultType: CandidType + for<'a> Deserialize<'a> + Into; -impl GetEventsFile { - /// To refresh the stored events on disk, call the tests as follows - /// ``` - /// bazel test --spawn_strategy=standalone //rs/bitcoin/ckbtc/minter:ckbtc_minter_replay_events_tests --test_env=RETRIEVE_MINTER_EVENTS=true --test_arg "should_replay_events_for_mainnet" --test_timeout 900 - /// ``` - /// The parameter `spawn_strategy=standalone` is needed, because the events will be fetched from the running canister and the default sandbox doesn't allow it. - /// The parameter `test_env=RETRIEVE_MINTER_EVENTS=true` is needed to enable the fetching of the events. async fn retrieve_and_store_events_if_env(&self) { if std::env::var("RETRIEVE_MINTER_EVENTS").map(|s| s.parse().ok().unwrap_or_default()) == Ok(true) @@ -182,6 +186,31 @@ impl GetEventsFile { } } + async fn get_events( + &self, + agent: &Agent, + minter_id: &Principal, + start: u64, + length: u64, + ) -> Vec { + use candid::{Decode, Encode}; + use ic_ckbtc_minter::state::eventlog::GetEventsArg; + + let arg = GetEventsArg { start, length }; + + let raw_result = agent + .update(minter_id, "get_events") + .with_arg(Encode!(&arg).unwrap()) + .call_and_wait() + .await + .expect("Failed to call get_events"); + Decode!(&raw_result, Vec) + .unwrap() + .into_iter() + .map(|x| x.into()) + .collect() + } + async fn retrieve_and_store_events(&self) { use candid::Encode; use flate2::bufread::GzEncoder; @@ -199,13 +228,14 @@ impl GetEventsFile { const MAX_EVENTS_PER_QUERY: u64 = 2000; let mut events = Vec::new(); loop { - let fetched_events = get_events( - &agent, - &self.minter_canister_id(), - events.len() as u64, - MAX_EVENTS_PER_QUERY, - ) - .await; + let fetched_events = self + .get_events( + &agent, + &self.minter_canister_id(), + events.len() as u64, + MAX_EVENTS_PER_QUERY, + ) + .await; if fetched_events.is_empty() { break; } @@ -231,12 +261,7 @@ impl GetEventsFile { .expect("BUG: failed to write events"); } - fn minter_canister_id(&self) -> Principal { - match self { - GetEventsFile::Mainnet => Principal::from_text("mqygn-kiaaa-aaaar-qaadq-cai").unwrap(), - GetEventsFile::Testnet => Principal::from_text("ml52i-qqaaa-aaaar-qaaba-cai").unwrap(), - } - } + fn minter_canister_id(&self) -> Principal; fn path_to_events_file(&self) -> PathBuf { let mut path = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); @@ -244,12 +269,7 @@ impl GetEventsFile { path } - fn file_name(&self) -> &str { - match self { - GetEventsFile::Mainnet => "mainnet_events.gz", - GetEventsFile::Testnet => "testnet_events.gz", - } - } + fn file_name(&self) -> &str; fn deserialize(&self) -> GetEventsResult { use candid::Decode; @@ -262,42 +282,37 @@ impl GetEventsFile { let mut decompressed_buffer = Vec::new(); gz.read_to_end(&mut decompressed_buffer) .expect("BUG: failed to decompress events"); - // TODO XC-261 The logic here assumes the compressed events in the file still use the - // 'old' Candid interface (i.e. a vector of `EventTypes`). Once the deployed minter - // canister on mainnet/testnet return a result with the new interface, the explicit - // conversion from `EventType` to `Event` must be removed. - Decode!(&decompressed_buffer, GetEventTypesResult) + Decode!(&decompressed_buffer, Self::ResultType) .expect("Failed to decode events") .into() } } -async fn get_events(agent: &Agent, minter_id: &Principal, start: u64, length: u64) -> Vec { - use candid::{Decode, Encode}; - use ic_ckbtc_minter::state::eventlog::GetEventsArg; - - let arg = GetEventsArg { start, length }; - - let raw_result = agent - .update(minter_id, "get_events") - .with_arg(Encode!(&arg).unwrap()) - .call_and_wait() - .await - .expect("Failed to call get_events"); - // TODO XC-261 The logic here assumes the result we get from the minter canister `get_events` - // endpoint still uses the 'old' Candid interface (i.e. a vector of `EventTypes`). Once the - // deployed minter canisters on mainnet/testnet return a result with the new interface, the - // explicit conversion from `EventType` to `Event` must be removed. - Decode!(&raw_result, Vec) - .unwrap() - .into_iter() - .map(Event::from) - .collect() +impl GetEventsFile for Mainnet { + type EventType = Event; + type ResultType = GetEventsResult; + fn minter_canister_id(&self) -> Principal { + Principal::from_text("mqygn-kiaaa-aaaar-qaadq-cai").unwrap() + } + fn file_name(&self) -> &str { + "mainnet_events.gz" + } +} + +impl GetEventsFile for Testnet { + type EventType = EventType; + type ResultType = GetEventsWithoutTimestampsResult; + fn minter_canister_id(&self) -> Principal { + Principal::from_text("ml52i-qqaaa-aaaar-qaaba-cai").unwrap() + } + fn file_name(&self) -> &str { + "testnet_events.gz" + } } // TODO XC-261: Remove #[derive(Clone, Debug, CandidType, Deserialize)] -pub struct GetEventTypesResult { +pub struct GetEventsWithoutTimestampsResult { pub events: Vec, pub total_event_count: u64, } @@ -309,8 +324,8 @@ pub struct GetEventsResult { } // TODO XC-261: Remove -impl From for GetEventsResult { - fn from(value: GetEventTypesResult) -> Self { +impl From for GetEventsResult { + fn from(value: GetEventsWithoutTimestampsResult) -> Self { Self { events: value.events.into_iter().map(Event::from).collect(), total_event_count: value.total_event_count, diff --git a/rs/execution_environment/src/canister_manager.rs b/rs/execution_environment/src/canister_manager.rs index 42a3d14936f..37267eae3f9 100644 --- a/rs/execution_environment/src/canister_manager.rs +++ b/rs/execution_environment/src/canister_manager.rs @@ -60,7 +60,7 @@ use ic_types::{ use ic_wasm_transform::Module; use ic_wasm_types::{doc_ref, AsErrorHelp, CanisterModule, ErrorHelp, WasmHash}; use num_traits::cast::ToPrimitive; -use num_traits::SaturatingAdd; +use num_traits::{SaturatingAdd, SaturatingSub}; use prometheus::IntCounter; use serde::{Deserialize, Serialize}; use std::path::PathBuf; @@ -1790,6 +1790,94 @@ impl CanisterManager { Ok(StoredChunksReply(keys)) } + // Runs the following checks on memory usage and return an error + // if any fails: + // 1. Check new usage will not freeze canister + // 2. Check subnet has available memory + + // Additionally calculates if any cycles will need to be reserved. + // + // This is to be used when taking or loading a canister snapshot + // to ensure consistency in checks. + fn memory_usage_checks( + &self, + subnet_size: usize, + canister: &mut CanisterState, + round_limits: &RoundLimits, + new_memory_usage: NumBytes, + old_memory_usage: NumBytes, + resource_saturation: &ResourceSaturation, + ) -> Result<(), CanisterManagerError> { + let memory_increase = new_memory_usage.saturating_sub(&old_memory_usage); + + let reservation_cycles = self.cycles_account_manager.storage_reservation_cycles( + memory_increase, + resource_saturation, + subnet_size, + ); + + // Check that the canister does not exceed its freezing threshold based + // on the new memory usage. + let threshold = self.cycles_account_manager.freeze_threshold_cycles( + canister.system_state.freeze_threshold, + canister.memory_allocation(), + new_memory_usage, + canister.message_memory_usage(), + canister.compute_allocation(), + subnet_size, + canister.system_state.reserved_balance(), + ); + + if canister.system_state.balance() < threshold + reservation_cycles { + return Err(CanisterManagerError::InsufficientCyclesInMemoryGrow { + bytes: memory_increase, + available: canister.system_state.balance(), + required: threshold + reservation_cycles, + }); + } + + // Verify that the subnet has enough memory available to satisfy the + // requested change by the canister. + round_limits + .subnet_available_memory + .check_available_memory(memory_increase, NumBytes::from(0), NumBytes::from(0)) + .map_err( + |_| CanisterManagerError::SubnetMemoryCapacityOverSubscribed { + requested: memory_increase, + available: NumBytes::from( + round_limits + .subnet_available_memory + .get_execution_memory() + .max(0) as u64, + ), + }, + )?; + + // Reserve needed cycles if the subnet is becoming saturated. + canister + .system_state + .reserve_cycles(reservation_cycles) + .map_err(|err| match err { + ReservationError::InsufficientCycles { + requested, + available, + } => CanisterManagerError::InsufficientCyclesInMemoryGrow { + bytes: memory_increase, + available, + required: requested, + }, + ReservationError::ReservedLimitExceed { requested, limit } => { + CanisterManagerError::ReservedCyclesLimitExceededInMemoryGrow { + bytes: memory_increase, + requested, + limit, + } + } + })?; + + Ok(()) + } + /// Creates a new canister snapshot. /// /// A canister snapshot can only be initiated by the controllers. @@ -1880,98 +1968,21 @@ impl CanisterManager { } let new_snapshot_size = canister.snapshot_size_bytes(); - let new_snapshot_increase = NumBytes::from( - new_snapshot_size - .get() - .saturating_sub(replace_snapshot_size.get()), - ); - let new_memory_usage = NumBytes::from( - canister - .memory_usage() - .get() - .saturating_add(new_snapshot_size.get()) - .saturating_sub(replace_snapshot_size.get()), - ); - { - // Run the following checks on memory usage and return an error - // if any fails: - // 1. Check new usage will not freeze canister - // 2. Check subnet has available memory - // 3. Reserve cycles on canister - // 4. Actually deduct memory from subnet (asserting it won't fail) - - // Calculate if any cycles will need to be reserved. - let reservation_cycles = self.cycles_account_manager.storage_reservation_cycles( - new_snapshot_increase, - resource_saturation, - subnet_size, - ); - - // Memory usage will increase by the snapshot size. - // Check that it doesn't bump the canister over the freezing threshold. - let threshold = self.cycles_account_manager.freeze_threshold_cycles( - canister.system_state.freeze_threshold, - canister.memory_allocation(), - new_memory_usage, - canister.message_memory_usage(), - canister.compute_allocation(), - subnet_size, - canister.system_state.reserved_balance(), - ); - - if canister.system_state.balance() < threshold + reservation_cycles { - return ( - Err(CanisterManagerError::InsufficientCyclesInMemoryGrow { - bytes: new_snapshot_increase, - available: canister.system_state.balance(), - required: threshold + reservation_cycles, - }), - NumInstructions::new(0), - ); - } - // Verify that the subnet has enough memory for a new snapshot. - if let Err(err) = round_limits - .subnet_available_memory - .check_available_memory(new_snapshot_increase, NumBytes::from(0), NumBytes::from(0)) - .map_err( - |_| CanisterManagerError::SubnetMemoryCapacityOverSubscribed { - requested: new_snapshot_increase, - available: NumBytes::from( - round_limits - .subnet_available_memory - .get_execution_memory() - .max(0) as u64, - ), - }, - ) - { - return (Err(err), NumInstructions::new(0)); - }; - // Reserve needed cycles if the subnet is becoming saturated. - if let Err(err) = canister - .system_state - .reserve_cycles(reservation_cycles) - .map_err(|err| match err { - ReservationError::InsufficientCycles { - requested, - available, - } => CanisterManagerError::InsufficientCyclesInMemoryGrow { - bytes: new_snapshot_increase, - available, - required: requested, - }, - ReservationError::ReservedLimitExceed { requested, limit } => { - CanisterManagerError::ReservedCyclesLimitExceededInMemoryGrow { - bytes: new_snapshot_increase, - requested, - limit, - } - } - }) - { - return (Err(err), NumInstructions::new(0)); - }; + let old_memory_usage = canister.memory_usage(); + let new_memory_usage = canister + .memory_usage() + .saturating_add(&new_snapshot_size) + .saturating_sub(&replace_snapshot_size); + if let Err(err) = self.memory_usage_checks( + subnet_size, + canister, + round_limits, + new_memory_usage, + old_memory_usage, + resource_saturation, + ) { + return (Err(err), NumInstructions::from(0)); } // Charge for taking a snapshot of the canister. @@ -2068,11 +2079,12 @@ impl CanisterManager { &self, subnet_size: usize, sender: PrincipalId, - canister: &CanisterState, + canister: &mut CanisterState, snapshot_id: SnapshotId, state: &mut ReplicatedState, round_limits: &mut RoundLimits, origin: CanisterChangeOrigin, + resource_saturation: &ResourceSaturation, long_execution_already_in_progress: &IntCounter, ) -> (Result, NumInstructions) { let canister_id = canister.canister_id(); @@ -2140,6 +2152,7 @@ impl CanisterManager { } // All basic checks have passed, charge baseline instructions. + let old_memory_usage = canister.memory_usage(); let mut canister_clone = canister.clone(); if let Err(err) = self.cycles_account_manager.consume_cycles_for_instructions( @@ -2224,6 +2237,25 @@ impl CanisterManager { ); } + if let Err(err) = self.memory_usage_checks( + subnet_size, + canister, + round_limits, + new_memory_usage, + old_memory_usage, + resource_saturation, + ) { + return (Err(err), instructions_used); + } + + // Actually deduct memory from the subnet. It's safe to unwrap + // here because we already checked the available memory above. + round_limits.subnet_available_memory.try_decrement( + new_memory_usage.saturating_sub(&old_memory_usage), + NumBytes::from(0), + NumBytes::from(0), + ).expect("Error: Cannot fail to decrement SubnetAvailableMemory after checking for availability"); + // Charge for loading the snapshot of the canister. if let Err(err) = self.cycles_account_manager.consume_cycles_for_instructions( &sender, diff --git a/rs/execution_environment/src/execution_environment.rs b/rs/execution_environment/src/execution_environment.rs index 7771bfa6911..b011d93a0ff 100644 --- a/rs/execution_environment/src/execution_environment.rs +++ b/rs/execution_environment/src/execution_environment.rs @@ -2173,7 +2173,7 @@ impl ExecutionEnvironment { ) -> (Result, UserError>, NumInstructions) { let canister_id = args.get_canister_id(); // Take canister out. - let old_canister = match state.take_canister_state(&canister_id) { + let mut old_canister = match state.take_canister_state(&canister_id) { None => { return ( Err(UserError::new( @@ -2187,14 +2187,17 @@ impl ExecutionEnvironment { }; let snapshot_id = args.snapshot_id(); + let resource_saturation = + self.subnet_memory_saturation(&round_limits.subnet_available_memory); let (result, instructions_used) = self.canister_manager.load_canister_snapshot( subnet_size, sender, - &old_canister, + &mut old_canister, snapshot_id, state, round_limits, origin, + &resource_saturation, &self.metrics.long_execution_already_in_progress, ); diff --git a/rs/memory_tracker/BUILD.bazel b/rs/memory_tracker/BUILD.bazel index 8fb051c228d..92774046a1d 100644 --- a/rs/memory_tracker/BUILD.bazel +++ b/rs/memory_tracker/BUILD.bazel @@ -44,3 +44,16 @@ rust_bench( "@crate_index//:criterion", ] + DEPENDENCIES, ) + +rust_bench( + name = "memory_ops_bench", + testonly = True, + srcs = ["benches/memory_ops.rs"], + data = ["benches/test-data/64KiB.txt"], + deps = [ + # Keep sorted. + "@crate_index//:criterion", + "@crate_index//:memmap2", + "@crate_index//:rayon", + ] + DEPENDENCIES, +) diff --git a/rs/memory_tracker/Cargo.toml b/rs/memory_tracker/Cargo.toml index b3db2b8aadc..628fde2ba9a 100644 --- a/rs/memory_tracker/Cargo.toml +++ b/rs/memory_tracker/Cargo.toml @@ -1,10 +1,10 @@ [package] -name = "memory_tracker" -version.workspace = true authors.workspace = true -edition.workspace = true description.workspace = true documentation.workspace = true +edition.workspace = true +name = "memory_tracker" +version.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -25,9 +25,15 @@ sigsegv_handler_checksum = [] [dev-dependencies] criterion = { workspace = true } ic-types = { path = "../types/types" } +memmap2 = { workspace = true } proptest = { workspace = true } +rayon = { workspace = true } tempfile = { workspace = true } [[bench]] +harness = false name = "traps" + +[[bench]] harness = false +name = "memory_ops" diff --git a/rs/memory_tracker/benches/memory_ops.rs b/rs/memory_tracker/benches/memory_ops.rs new file mode 100644 index 00000000000..b007bfc2f24 --- /dev/null +++ b/rs/memory_tracker/benches/memory_ops.rs @@ -0,0 +1,156 @@ +use std::{fs::File, os::unix::fs::FileExt}; + +use criterion::{criterion_group, criterion_main, Criterion, Throughput}; +use memmap2::MmapOptions; +use rayon::prelude::*; + +const DATA: &str = "rs/memory_tracker/benches/test-data/64KiB.txt"; +const OPS: [usize; 1] = [100]; +const THREADS: [usize; 4] = [1, 2, 4, 8]; +const PAGE_SIZE: usize = 4096; +const WASM_PAGE_SIZE: usize = 64 * 1024; + +fn mmap(file: &File) { + // Calls `mmap` with `PROT_READ` and `MAP_PRIVATE`. + let _mm = unsafe { MmapOptions::new().map_copy_read_only(file).unwrap() }; +} + +fn mmap_mprotect(file: &File) { + // Calls `mmap` with `PROT_READ` and `MAP_PRIVATE`. + let mm = unsafe { MmapOptions::new().map_copy_read_only(file).unwrap() }; + // Calls `mprotect` with `PROT_READ | PROT_WRITE`. + let _mm = mm.make_mut().unwrap(); +} + +fn mmap_mprotect_read(file: &File) { + // Calls `mmap` with `PROT_READ` and `MAP_PRIVATE`. + let mm = unsafe { MmapOptions::new().map_copy_read_only(file).unwrap() }; + // Calls `mprotect` with `PROT_READ | PROT_WRITE`. + let mm = mm.make_mut().unwrap(); + // Reads 64 KiB. + for i in 0..64 / 4 { + let _b = std::hint::black_box(mm[PAGE_SIZE * i]); + } +} + +fn mmap_mprotect_write(file: &File) { + // Calls `mmap` with `PROT_READ` and `MAP_PRIVATE`. + let mm = unsafe { MmapOptions::new().map_copy_read_only(file).unwrap() }; + // Calls `mprotect` with `PROT_READ | PROT_WRITE`. + let mut mm = mm.make_mut().unwrap(); + // Makes 64 KiB copies on write. + for i in 0..64 / 4 { + mm[PAGE_SIZE * i] = 42; + } +} + +fn mmap_mprotect_read_write(file: &File) { + // Calls `mmap` with `PROT_READ` and `MAP_PRIVATE`. + let mm = unsafe { MmapOptions::new().map_copy_read_only(file).unwrap() }; + // Calls `mprotect` with `PROT_READ | PROT_WRITE`. + let mut mm = mm.make_mut().unwrap(); + // Reads then makes 64 KiB copies on write. + for i in 0..64 / 4 { + mm[PAGE_SIZE * i] = mm[1 + PAGE_SIZE * i]; + } +} + +fn mmap_read_write(file: &File) { + // Calls `mmap` with `PROT_READ | PROT_WRITE` and `MAP_PRIVATE`. + let mut mm = unsafe { MmapOptions::new().map_copy(file).unwrap() }; + // Reads then makes 64 KiB copies on write. + for i in 0..64 / 4 { + mm[PAGE_SIZE * i] = mm[1 + PAGE_SIZE * i]; + } +} + +fn file_read_write(file: &File) { + let mut buf = [0u8; WASM_PAGE_SIZE]; + file.read_exact_at(&mut buf, 0).unwrap(); + for i in 0..64 / 4 { + buf[PAGE_SIZE * i] = buf[1 + PAGE_SIZE * i]; + } +} + +fn bench(c: &mut Criterion, group_name: &str, routine: fn(&File)) { + let mut group = c.benchmark_group(group_name); + + let file = File::open(DATA).unwrap(); + + for ops in OPS { + let input = vec![&file; ops]; + + group.throughput(Throughput::Elements(ops as u64)); + + group.bench_with_input(format!("iter/ops:{ops}"), &input, |b, i| { + b.iter(|| { + i.iter().for_each(|f| { + routine(f); + }) + }) + }); + + for threads in THREADS { + let pool = rayon::ThreadPoolBuilder::new() + .num_threads(threads) + .build() + .unwrap(); + + group.bench_with_input( + format!("par_iter/threads:{threads}/ops:{ops}"), + &input, + |b, i| { + b.iter(|| { + pool.install(|| { + i.par_iter().for_each(|f| { + routine(f); + }) + }) + }) + }, + ); + } + } + + group.finish(); +} + +fn mmap_bench(c: &mut Criterion) { + bench(c, "mmap", mmap); +} + +fn mmap_mprotect_bench(c: &mut Criterion) { + bench(c, "mmap_mprotect", mmap_mprotect); +} + +fn mmap_mprotect_read_bench(c: &mut Criterion) { + bench(c, "mmap_mprotect_read", mmap_mprotect_read); +} + +fn mmap_mprotect_write_bench(c: &mut Criterion) { + bench(c, "mmap_mprotect_write", mmap_mprotect_write); +} + +fn mmap_mprotect_read_write_bench(c: &mut Criterion) { + bench(c, "mmap_mprotect_read_write", mmap_mprotect_read_write); +} + +fn mmap_read_write_bench(c: &mut Criterion) { + bench(c, "mmap_read_write", mmap_read_write); +} + +fn file_read_write_bench(c: &mut Criterion) { + bench(c, "file_read_write", file_read_write); +} + +criterion_group!( + benches, + mmap_bench, + mmap_mprotect_bench, + mmap_mprotect_read_bench, + mmap_mprotect_write_bench, + mmap_mprotect_read_write_bench, + mmap_read_write_bench, + file_read_write_bench, +); +criterion_main!(benches); diff --git a/rs/memory_tracker/benches/test-data/64KiB.txt b/rs/memory_tracker/benches/test-data/64KiB.txt new file mode 100644 index 00000000000..6ce859af5c3 --- /dev/null +++ b/rs/memory_tracker/benches/test-data/64KiB.txt @@ -0,0 +1,64 @@ +*10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 20000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 40000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 + 80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +140000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +150000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +160000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +180000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +190000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +220000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +230000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +260000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +270000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +280000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +310000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +340000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +360000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +380000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +390000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +420000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +430000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +440000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +460000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +470000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +480000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +510000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +520000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*30000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +540000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +550000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +560000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*70000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +580000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +590000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +*10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +620000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +630000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 +640000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000 diff --git a/rs/nervous_system/agent/src/management_canister/mod.rs b/rs/nervous_system/agent/src/management_canister/mod.rs index 968e59c355f..036b2e46d74 100644 --- a/rs/nervous_system/agent/src/management_canister/mod.rs +++ b/rs/nervous_system/agent/src/management_canister/mod.rs @@ -10,6 +10,50 @@ use requests::*; pub const CHUNK_SIZE: usize = 1024 * 1024; // 1 MiB +pub async fn canister_status( + agent: &C, + canister_id: CanisterId, +) -> Result { + let response = agent + .call( + Principal::management_canister(), + CanisterStatusArgs { + canister_id: canister_id.get().0, + }, + ) + .await?; + + Ok(response) +} + +pub async fn stop_canister( + agent: &C, + canister_id: CanisterId, +) -> Result<(), C::Error> { + agent + .call( + Principal::management_canister(), + StopCanisterArgs { + canister_id: canister_id.get().0, + }, + ) + .await +} + +pub async fn delete_canister( + agent: &C, + canister_id: CanisterId, +) -> Result<(), C::Error> { + agent + .call( + Principal::management_canister(), + DeleteCanisterArgs { + canister_id: canister_id.get().0, + }, + ) + .await +} + async fn upload_chunk( agent: &C, store_canister_id: CanisterId, diff --git a/rs/nervous_system/agent/src/management_canister/requests.rs b/rs/nervous_system/agent/src/management_canister/requests.rs index c60815237ef..9c1ddffcb66 100644 --- a/rs/nervous_system/agent/src/management_canister/requests.rs +++ b/rs/nervous_system/agent/src/management_canister/requests.rs @@ -78,3 +78,156 @@ impl Request for StoredChunksArgs { type Response = StoredChunksResult; } + +// ``` +// type canister_status_args = record { +// canister_id : canister_id; +// }; +// ``` + +// ``` +// type canister_status_result = record { +// status : variant { running; stopping; stopped }; +// settings : definite_canister_settings; +// module_hash : opt blob; +// memory_size : nat; +// cycles : nat; +// reserved_cycles : nat; +// idle_cycles_burned_per_day : nat; +// query_stats: record { +// num_calls_total: nat; +// num_instructions_total: nat; +// request_payload_bytes_total: nat; +// response_payload_bytes_total: nat; +// }; +// }; +// ``` + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub enum LogVisibility { + #[serde(rename = "controllers")] + Controllers, + #[serde(rename = "public")] + Public, + #[serde(rename = "allowed_viewers")] + AllowedViewers(Vec), +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub enum CanisterStatusResultStatus { + #[serde(rename = "stopped")] + Stopped, + #[serde(rename = "stopping")] + Stopping, + #[serde(rename = "running")] + Running, +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct DefiniteCanisterSettings { + pub freezing_threshold: candid::Nat, + pub controllers: Vec, + pub reserved_cycles_limit: candid::Nat, + pub log_visibility: LogVisibility, + pub wasm_memory_limit: candid::Nat, + pub memory_allocation: candid::Nat, + pub compute_allocation: candid::Nat, +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct CanisterStatusResultQueryStats { + pub response_payload_bytes_total: candid::Nat, + pub num_instructions_total: candid::Nat, + pub num_calls_total: candid::Nat, + pub request_payload_bytes_total: candid::Nat, +} + +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct CanisterStatusResult { + pub status: CanisterStatusResultStatus, + pub settings: DefiniteCanisterSettings, + pub module_hash: Option>, + pub memory_size: candid::Nat, + pub cycles: candid::Nat, + pub reserved_cycles: candid::Nat, + pub idle_cycles_burned_per_day: candid::Nat, + pub query_stats: CanisterStatusResultQueryStats, +} + +// ``` +// type canister_status_args = record { +// canister_id : canister_id; +// }; +// ``` +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct CanisterStatusArgs { + pub canister_id: Principal, +} + +impl Request for CanisterStatusArgs { + fn method(&self) -> &'static str { + "canister_status" + } + + fn update(&self) -> bool { + true + } + + fn payload(&self) -> Result, candid::Error> { + candid::encode_one(self) + } + + type Response = CanisterStatusResult; +} + +// ``` +// type stop_canister_args = record { +// canister_id : canister_id; +// }; +// ``` +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct StopCanisterArgs { + pub canister_id: Principal, +} + +impl Request for StopCanisterArgs { + fn method(&self) -> &'static str { + "stop_canister" + } + + fn update(&self) -> bool { + true + } + + fn payload(&self) -> Result, candid::Error> { + candid::encode_one(self) + } + + type Response = (); +} + +// ``` +// type delete_canister_args = record { +// canister_id : canister_id; +// }; +// ``` +#[derive(CandidType, Deserialize, Debug, Clone)] +pub struct DeleteCanisterArgs { + pub canister_id: Principal, +} + +impl Request for DeleteCanisterArgs { + fn method(&self) -> &'static str { + "delete_canister" + } + + fn update(&self) -> bool { + true + } + + fn payload(&self) -> Result, candid::Error> { + candid::encode_one(self) + } + + type Response = (); +} diff --git a/rs/nervous_system/agent/src/pocketic_impl.rs b/rs/nervous_system/agent/src/pocketic_impl.rs index d349c66c242..9190c8e3b90 100644 --- a/rs/nervous_system/agent/src/pocketic_impl.rs +++ b/rs/nervous_system/agent/src/pocketic_impl.rs @@ -1,4 +1,6 @@ -use crate::management_canister::requests::{StoredChunksArgs, UploadChunkArgs}; +use crate::management_canister::requests::{ + CanisterStatusArgs, DeleteCanisterArgs, StopCanisterArgs, StoredChunksArgs, UploadChunkArgs, +}; use crate::Request; use crate::{CallCanisters, CanisterInfo}; use candid::Principal; @@ -69,6 +71,21 @@ impl PocketIcAgent<'_> { .map_err(PocketIcCallError::CandidDecode)? .canister_id } + "canister_status" => { + candid::decode_one::(payload.as_slice()) + .map_err(PocketIcCallError::CandidDecode)? + .canister_id + } + "stop_canister" => { + candid::decode_one::(payload.as_slice()) + .map_err(PocketIcCallError::CandidDecode)? + .canister_id + } + "delete_canister" => { + candid::decode_one::(payload.as_slice()) + .map_err(PocketIcCallError::CandidDecode)? + .canister_id + } mathod_name => { unimplemented!( "PocketIcAgent does not currently implement IC00.{}", diff --git a/rs/nervous_system/agent/src/sns/governance.rs b/rs/nervous_system/agent/src/sns/governance.rs index 37e9165a24d..306c5134449 100644 --- a/rs/nervous_system/agent/src/sns/governance.rs +++ b/rs/nervous_system/agent/src/sns/governance.rs @@ -2,8 +2,9 @@ use crate::{null_request::NullRequest, CallCanisters}; use ic_base_types::PrincipalId; use ic_sns_governance_api::pb::v1::{ manage_neuron, manage_neuron_response, GetMetadataRequest, GetMetadataResponse, GetMode, - GetModeResponse, GetRunningSnsVersionRequest, GetRunningSnsVersionResponse, GovernanceError, - ManageNeuron, ManageNeuronResponse, NervousSystemParameters, NeuronId, Proposal, ProposalId, + GetModeResponse, GetProposal, GetProposalResponse, GetRunningSnsVersionRequest, + GetRunningSnsVersionResponse, GovernanceError, ManageNeuron, ManageNeuronResponse, + NervousSystemParameters, NeuronId, Proposal, ProposalId, }; use serde::{Deserialize, Serialize}; use thiserror::Error; @@ -108,6 +109,17 @@ impl GovernanceCanister { Ok(response) } + + pub async fn get_proposal( + &self, + agent: &C, + proposal_id: ProposalId, + ) -> Result { + let request = GetProposal { + proposal_id: Some(proposal_id), + }; + agent.call(self.canister_id, request).await + } } impl GovernanceCanister { diff --git a/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs b/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs index 656e2b904d6..0ac6fe09568 100644 --- a/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs +++ b/rs/nervous_system/integration_tests/src/pocket_ic_helpers.rs @@ -1107,7 +1107,8 @@ pub mod nns { include_empty_neurons_readable_by_caller: Some(true), include_public_neurons_in_full_neurons: None, page_number: None, - page_size: None + page_size: None, + neuron_subaccounts: None, }) .unwrap(), ) diff --git a/rs/nervous_system/integration_tests/tests/upgrade_sns_controlled_canister_with_large_wasm.rs b/rs/nervous_system/integration_tests/tests/upgrade_sns_controlled_canister_with_large_wasm.rs index fc563562792..6ee7503a996 100644 --- a/rs/nervous_system/integration_tests/tests/upgrade_sns_controlled_canister_with_large_wasm.rs +++ b/rs/nervous_system/integration_tests/tests/upgrade_sns_controlled_canister_with_large_wasm.rs @@ -1,5 +1,11 @@ +use assert_matches::assert_matches; use canister_test::Wasm; -use ic_nervous_system_agent::pocketic_impl::PocketIcAgent; +use ic_base_types::CanisterId; +use ic_management_canister_types::CanisterInstallMode; +use ic_nervous_system_agent::management_canister::canister_status; +use ic_nervous_system_agent::pocketic_impl::{ + PocketIcAgent, PocketIcCallError::CanisterSubnetNotFound, +}; use ic_nervous_system_integration_tests::pocket_ic_helpers::sns::governance::{ find_neuron_with_majority_voting_power, wait_for_proposal_execution, }; @@ -15,8 +21,10 @@ use ic_nns_constants::ROOT_CANISTER_ID; use ic_nns_test_utils::common::modify_wasm_bytes; use ic_sns_cli::neuron_id_to_candid_subaccount::ParsedSnsNeuron; use ic_sns_cli::upgrade_sns_controlled_canister::{ - self, UpgradeSnsControlledCanisterArgs, UpgradeSnsControlledCanisterInfo, + self, RefundAfterSnsControlledCanisterUpgradeArgs, UpgradeSnsControlledCanisterArgs, + UpgradeSnsControlledCanisterInfo, }; +use ic_sns_governance_api::pb::v1::{proposal, ChunkedCanisterWasm, UpgradeSnsControlledCanister}; use ic_sns_swap::pb::v1::Lifecycle; use icp_ledger::Tokens; use pocket_ic::PocketIcBuilder; @@ -171,11 +179,39 @@ async fn upgrade_sns_controlled_canister_with_large_wasm() { let proposal_id = proposal_id.unwrap(); // 3. Await proposal execution. - wait_for_proposal_execution(&pocket_ic, sns.governance.canister_id, proposal_id) + let action = wait_for_proposal_execution(&pocket_ic, sns.governance.canister_id, proposal_id) .await + .unwrap() + .proposal + .unwrap() + .action .unwrap(); - // 4. Inspect the resulting state. + // 4. Inspect proposal data (and obtain store_canister_id for future inspection). + let proposal::Action::UpgradeSnsControlledCanister(UpgradeSnsControlledCanister { + canister_id, + new_canister_wasm, + canister_upgrade_arg, + mode, + chunked_canister_wasm, + }) = action + else { + panic!("unexpected proposal action {:?}", action); + }; + assert_eq!(canister_id, Some(target_canister_id.into())); + assert_eq!(new_canister_wasm, Vec::::new()); // Deprecated field, no longer in use. + assert_eq!(canister_upgrade_arg, None); + assert_eq!(mode, Some(CanisterInstallMode::Upgrade as i32)); + let store_canister_id = assert_matches!(chunked_canister_wasm, Some(ChunkedCanisterWasm { + wasm_module_hash: observed_wasm_module_hash, + store_canister_id: Some(store_canister_id), + .. + }) => { + assert_eq!(observed_wasm_module_hash, wasm_module_hash); + store_canister_id + }); + + // 5. Inspect the resulting state. await_with_timeout( &pocket_ic, MIN_INSTALL_CHUNKED_CODE_TIME_SECONDS..MAX_INSTALL_CHUNKED_CODE_TIME_SECONDS, @@ -191,4 +227,22 @@ async fn upgrade_sns_controlled_canister_with_large_wasm() { ) .await .unwrap(); + + // 6. Clean-up. + let refund_arg = RefundAfterSnsControlledCanisterUpgradeArgs { + target_canister_id, + proposal_id: proposal_id.id, + }; + upgrade_sns_controlled_canister::refund(refund_arg, &pocket_ic_agent) + .await + .unwrap(); + + // 7. Assert that store canister has zero cycles left on its balance. + let err = canister_status( + &pocket_ic_agent, + CanisterId::unchecked_from_principal(store_canister_id), + ) + .await + .unwrap_err(); + assert_matches!(err, CanisterSubnetNotFound { .. }); } diff --git a/rs/nns/governance/BUILD.bazel b/rs/nns/governance/BUILD.bazel index 1b3bbdc4596..d73d0ec294b 100644 --- a/rs/nns/governance/BUILD.bazel +++ b/rs/nns/governance/BUILD.bazel @@ -219,6 +219,7 @@ rust_canbench( name = "governance-canbench", srcs = ["canbench/main.rs"], add_test = True, + noise_threshold = 5.0, # For some reason, the NNS Governance benchmarks are sensitive to the optimization level. # We would like to figure out why and fix it, but for now, we are reducing the optimization # level so that tests against the optimization level can be added. diff --git a/rs/nns/governance/CHANGELOG.md b/rs/nns/governance/CHANGELOG.md index 6c55280fa66..3494d3275f3 100644 --- a/rs/nns/governance/CHANGELOG.md +++ b/rs/nns/governance/CHANGELOG.md @@ -8,12 +8,61 @@ The process that populates this file is described in here were moved from the adjacent `unreleased_changelog.md` file. -INSERT NEW RELEASES HERE +# 2025-02-03: Proposal 135063 +http://dashboard.internetcomputer.org/proposal/135063 + +## Added + +### Migrating Active Neurons to Stable Memory + +In this relesae, we turn on 2 features related to migrating active neurons to stable memory: + +1. `allow_active_neurons_in_stable_memory`: this allows the canister to look for active neurons in + stable memory, while previously the canister always assumes active neurons are always in the heap. + +2. `use_stable_memory_following_index`: this lets the canister use the neuron following index in the + stable memory, instead of the one in the heap. + +No neurons are actually migrated yet. + +## Changed + +* The `list_neurons` behavior is slightly changed: the `include_empty_neurons_readable_by_caller` + was default to true before, and now it's default to true. More details can be found at: + https://forum.dfinity.org/t/listneurons-api-change-empty-neurons/40311 + + +# 2021-01-27: Proposal 134988 + +https://dashboard.internetcomputer.org/proposal/134988 + +## Added + +### List Neurons Paging + +Two new fields are added to the request, and one to the response. + +The request now supports `page_size` and `page_number`. If `page_size` is greater than +`MAX_LIST_NEURONS_RESULTS` (currently 500), the API will treat it as `MAX_LIST_NEURONS_RESULTS`, and +continue procesisng the request. If `page_number` is None, the API will treat it as Some(0) + +In the response, a field `total_pages_available` is available to tell the user how many +additional requests need to be made. + +This will only affect neuron holders with more than 500 neurons, which is a small minority. + +This allows neuron holders with many neurons to list all of their neurons, whereas before, +responses could be too large to be sent by the protocol. + +## Changed + +* `InstallCode` proposal payload hashes are now computed when making the proposal instead of when + listing proposal. Hashes for existing proposals are backfilled. # 2025-01-13: Proposal 134777 -http://dashboard.internetcomputer.org/proposals/134777 +http://dashboard.internetcomputer.org/proposal/134777 ### Periodic Confirmation diff --git a/rs/nns/governance/api/src/ic_nns_governance.pb.v1.rs b/rs/nns/governance/api/src/ic_nns_governance.pb.v1.rs index b8ab14d17e4..343d192f290 100644 --- a/rs/nns/governance/api/src/ic_nns_governance.pb.v1.rs +++ b/rs/nns/governance/api/src/ic_nns_governance.pb.v1.rs @@ -3463,6 +3463,52 @@ pub struct ListProposalInfo { pub struct ListProposalInfoResponse { pub proposal_info: Vec, } + +/// The same as ListNeurons, but only used in list_neurons_pb, which is deprecated. +/// This is temporarily split out so that the API changes to list_neurons do not have to +/// follow both candid and protobuf standards for changes, which simplifies the API design +/// considerably. +/// +/// This type should be removed when list_neurons_pb is finally deprecated. +#[derive(candid::CandidType, candid::Deserialize, serde::Serialize, comparable::Comparable)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListNeuronsProto { + /// The neurons to get information about. The "requested list" + /// contains all of these neuron IDs. + #[prost(fixed64, repeated, packed = "false", tag = "1")] + pub neuron_ids: Vec, + /// If true, the "requested list" also contains the neuron ID of the + /// neurons that the calling principal is authorized to read. + #[prost(bool, tag = "2")] + pub include_neurons_readable_by_caller: bool, + /// Whether to also include empty neurons readable by the caller. This field only has an effect + /// when `include_neurons_readable_by_caller` is true. If a neuron's id already exists in the + /// `neuron_ids` field, then the neuron will be included in the response regardless of the value + /// of this field. The default value is false (i.e. `None` is treated as `Some(false)`). Here, + /// being "empty" means 0 stake, 0 maturity and 0 staked maturity. + #[prost(bool, optional, tag = "3")] + pub include_empty_neurons_readable_by_caller: Option, + /// If this is set to true, and a neuron in the "requested list" has its + /// visibility set to public, then, it will (also) be included in the + /// full_neurons field in the response (which is of type ListNeuronsResponse). + /// Note that this has no effect on which neurons are in the "requested list". + /// In particular, this does not cause all public neurons to become part of the + /// requested list. In general, you probably want to set this to true, but + /// since this feature was added later, it is opt in to avoid confusing + /// existing (unmigrated) callers. + #[prost(bool, optional, tag = "4")] + pub include_public_neurons_in_full_neurons: Option, + /// If this is set, we return the batch of neurons at a given page, using the `page_size` to + /// determine how many neurons are returned in each page. + #[prost(uint64, optional, tag = "5")] + pub page_number: Option, + /// If this is set, we use the page limit provided to determine how large pages will be. + /// This cannot be greater than MAX_LIST_NEURONS_RESULTS, which is set to 500. + /// If not set, this defaults to MAX_LIST_NEURONS_RESULTS. + #[prost(uint64, optional, tag = "6")] + pub page_size: Option, +} /// A request to list neurons. The "requested list", i.e., the list of /// neuron IDs to retrieve information about, is the union of the list /// of neurons listed in `neuron_ids` and, if `caller_neurons` is true, @@ -3484,22 +3530,19 @@ pub struct ListProposalInfoResponse { /// will be returned in the current page. #[derive(candid::CandidType, candid::Deserialize, serde::Serialize, comparable::Comparable)] #[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] +#[derive(Clone, Debug, Default, PartialEq)] pub struct ListNeurons { /// The neurons to get information about. The "requested list" /// contains all of these neuron IDs. - #[prost(fixed64, repeated, packed = "false", tag = "1")] pub neuron_ids: Vec, /// If true, the "requested list" also contains the neuron ID of the /// neurons that the calling principal is authorized to read. - #[prost(bool, tag = "2")] pub include_neurons_readable_by_caller: bool, /// Whether to also include empty neurons readable by the caller. This field only has an effect /// when `include_neurons_readable_by_caller` is true. If a neuron's id already exists in the /// `neuron_ids` field, then the neuron will be included in the response regardless of the value /// of this field. The default value is false (i.e. `None` is treated as `Some(false)`). Here, /// being "empty" means 0 stake, 0 maturity and 0 staked maturity. - #[prost(bool, optional, tag = "3")] pub include_empty_neurons_readable_by_caller: Option, /// If this is set to true, and a neuron in the "requested list" has its /// visibility set to public, then, it will (also) be included in the @@ -3509,18 +3552,30 @@ pub struct ListNeurons { /// requested list. In general, you probably want to set this to true, but /// since this feature was added later, it is opt in to avoid confusing /// existing (unmigrated) callers. - #[prost(bool, optional, tag = "4")] pub include_public_neurons_in_full_neurons: Option, /// If this is set, we return the batch of neurons at a given page, using the `page_size` to /// determine how many neurons are returned in each page. - #[prost(uint64, optional, tag = "5")] pub page_number: Option, /// If this is set, we use the page limit provided to determine how large pages will be. /// This cannot be greater than MAX_LIST_NEURONS_RESULTS, which is set to 500. /// If not set, this defaults to MAX_LIST_NEURONS_RESULTS. - #[prost(uint64, optional, tag = "6")] pub page_size: Option, + /// A list of neurons by subaccounts to return in the response. If the neurons are not + /// found by subaccount, no error is returned, but the page will still be returned. + pub neuron_subaccounts: Option>, } + +pub mod list_neurons { + /// A type for the request to list neurons. + #[derive(candid::CandidType, candid::Deserialize, serde::Serialize, comparable::Comparable)] + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, Debug, PartialEq)] + pub struct NeuronSubaccount { + #[serde(with = "serde_bytes")] + pub subaccount: Vec, + } +} + /// A response to a `ListNeurons` request. /// /// The "requested list" is described in `ListNeurons`. diff --git a/rs/nns/governance/api/src/pb.rs b/rs/nns/governance/api/src/pb.rs index bd02a22c64e..ab16fc9e814 100644 --- a/rs/nns/governance/api/src/pb.rs +++ b/rs/nns/governance/api/src/pb.rs @@ -1,8 +1,8 @@ use crate::pb::v1::{ governance::migration::MigrationStatus, governance_error::ErrorType, neuron::DissolveState, - CreateServiceNervousSystem, GovernanceError, NetworkEconomics, Neuron, NeuronState, - NeuronsFundEconomics, NeuronsFundMatchedFundingCurveCoefficients, VotingPowerEconomics, - XdrConversionRate, + CreateServiceNervousSystem, GovernanceError, ListNeurons, ListNeuronsProto, NetworkEconomics, + Neuron, NeuronState, NeuronsFundEconomics, NeuronsFundMatchedFundingCurveCoefficients, + VotingPowerEconomics, XdrConversionRate, }; use ic_nervous_system_common::{ONE_DAY_SECONDS, ONE_MONTH_SECONDS}; use ic_nervous_system_proto::pb::v1::{Decimal, Duration, GlobalTimeOfDay, Percentage}; @@ -264,3 +264,20 @@ impl CreateServiceNervousSystem { Ok((swap_start_timestamp_seconds, swap_due_timestamp_seconds)) } } + +impl From for ListNeurons { + fn from(list_neurons_proto: ListNeuronsProto) -> Self { + Self { + neuron_ids: list_neurons_proto.neuron_ids, + include_neurons_readable_by_caller: list_neurons_proto + .include_neurons_readable_by_caller, + include_empty_neurons_readable_by_caller: list_neurons_proto + .include_empty_neurons_readable_by_caller, + include_public_neurons_in_full_neurons: list_neurons_proto + .include_public_neurons_in_full_neurons, + page_number: list_neurons_proto.page_number, + page_size: list_neurons_proto.page_size, + neuron_subaccounts: None, + } + } +} diff --git a/rs/nns/governance/canbench/canbench_results.yml b/rs/nns/governance/canbench/canbench_results.yml index db028e938c3..7f5919fa4d2 100644 --- a/rs/nns/governance/canbench/canbench_results.yml +++ b/rs/nns/governance/canbench/canbench_results.yml @@ -1,169 +1,181 @@ benches: add_neuron_active_maximum: total: - instructions: 42752796 + instructions: 42752810 heap_increase: 1 stable_memory_increase: 0 scopes: {} add_neuron_active_typical: total: - instructions: 2170658 + instructions: 2170672 heap_increase: 0 stable_memory_increase: 0 scopes: {} add_neuron_inactive_maximum: total: - instructions: 112624375 + instructions: 112624946 heap_increase: 1 stable_memory_increase: 0 scopes: {} add_neuron_inactive_typical: total: - instructions: 8497036 + instructions: 8497607 heap_increase: 0 stable_memory_increase: 0 scopes: {} cascading_vote_all_heap: total: - instructions: 35676146 + instructions: 35610228 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_heap_neurons_stable_index: total: - instructions: 61811185 + instructions: 61744033 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_stable_everything: total: - instructions: 188611915 + instructions: 189084050 heap_increase: 0 stable_memory_increase: 128 scopes: {} cascading_vote_stable_neurons_with_heap_index: total: - instructions: 162343480 + instructions: 162816849 heap_increase: 0 stable_memory_increase: 128 scopes: {} centralized_following_all_stable: total: - instructions: 78265237 + instructions: 78519736 heap_increase: 0 stable_memory_increase: 128 scopes: {} compute_ballots_for_new_proposal_with_stable_neurons: total: - instructions: 2230000 + instructions: 2265911 heap_increase: 0 stable_memory_increase: 0 scopes: {} draw_maturity_from_neurons_fund_heap: total: - instructions: 7656798 + instructions: 7607998 heap_increase: 0 stable_memory_increase: 0 scopes: {} draw_maturity_from_neurons_fund_stable: total: - instructions: 12339498 + instructions: 12444384 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_active_neurons_fund_neurons_heap: total: - instructions: 435463 + instructions: 435492 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_active_neurons_fund_neurons_stable: total: - instructions: 2820000 + instructions: 2819667 heap_increase: 0 stable_memory_increase: 0 scopes: {} + list_neurons_by_subaccount_heap: + total: + instructions: 7551696 + heap_increase: 9 + stable_memory_increase: 0 + scopes: {} + list_neurons_by_subaccount_stable: + total: + instructions: 111661652 + heap_increase: 5 + stable_memory_increase: 0 + scopes: {} list_neurons_heap: total: - instructions: 4950000 + instructions: 4963821 heap_increase: 9 stable_memory_increase: 0 scopes: {} list_neurons_ready_to_unstake_maturity_heap: total: - instructions: 158253 + instructions: 158195 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_neurons_ready_to_unstake_maturity_stable: total: - instructions: 43300000 + instructions: 43332558 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_neurons_stable: total: - instructions: 113606723 + instructions: 113665560 heap_increase: 5 stable_memory_increase: 0 scopes: {} list_proposals: total: - instructions: 126040 + instructions: 126041 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_ready_to_spawn_neuron_ids_heap: total: - instructions: 132847 + instructions: 132789 heap_increase: 0 stable_memory_increase: 0 scopes: {} list_ready_to_spawn_neuron_ids_stable: total: - instructions: 43270000 + instructions: 43304554 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_data_validation_heap: total: - instructions: 406853184 + instructions: 407435747 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_data_validation_stable: total: - instructions: 362648372 + instructions: 363402784 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_metrics_calculation_heap: total: - instructions: 1498869 + instructions: 1485668 heap_increase: 0 stable_memory_increase: 0 scopes: {} neuron_metrics_calculation_stable: total: - instructions: 3027495 + instructions: 3085347 heap_increase: 0 stable_memory_increase: 0 scopes: {} range_neurons_performance: total: - instructions: 56447340 + instructions: 56514456 heap_increase: 0 stable_memory_increase: 0 scopes: {} single_vote_all_stable: total: - instructions: 2805871 + instructions: 2806911 heap_increase: 0 stable_memory_increase: 128 scopes: {} update_recent_ballots_stable_memory: total: - instructions: 274000 + instructions: 275035 heap_increase: 0 stable_memory_increase: 0 scopes: {} diff --git a/rs/nns/governance/canister/canister.rs b/rs/nns/governance/canister/canister.rs index 0df9c573032..276f7286f1d 100644 --- a/rs/nns/governance/canister/canister.rs +++ b/rs/nns/governance/canister/canister.rs @@ -42,11 +42,11 @@ use ic_nns_governance_api::{ manage_neuron_response, ClaimOrRefreshNeuronFromAccount, ClaimOrRefreshNeuronFromAccountResponse, GetNeuronsFundAuditInfoRequest, GetNeuronsFundAuditInfoResponse, Governance as ApiGovernanceProto, GovernanceError, - ListKnownNeuronsResponse, ListNeurons, ListNeuronsResponse, ListNodeProviderRewardsRequest, - ListNodeProviderRewardsResponse, ListNodeProvidersResponse, ListProposalInfo, - ListProposalInfoResponse, ManageNeuronCommandRequest, ManageNeuronRequest, - ManageNeuronResponse, MonthlyNodeProviderRewards, NetworkEconomics, Neuron, NeuronInfo, - NodeProvider, Proposal, ProposalInfo, RestoreAgingSummary, RewardEvent, + ListKnownNeuronsResponse, ListNeurons, ListNeuronsProto, ListNeuronsResponse, + ListNodeProviderRewardsRequest, ListNodeProviderRewardsResponse, ListNodeProvidersResponse, + ListProposalInfo, ListProposalInfoResponse, ManageNeuronCommandRequest, + ManageNeuronRequest, ManageNeuronResponse, MonthlyNodeProviderRewards, NetworkEconomics, + Neuron, NeuronInfo, NodeProvider, Proposal, ProposalInfo, RestoreAgingSummary, RewardEvent, SettleCommunityFundParticipation, SettleNeuronsFundParticipationRequest, SettleNeuronsFundParticipationResponse, UpdateNodeProvider, Vote, }, @@ -907,8 +907,10 @@ fn list_neurons_pb() { ); ic_cdk::setup(); - let request = ListNeurons::decode(&arg_data_raw()[..]).expect("Could not decode ListNeurons"); - let res: ListNeuronsResponse = list_neurons(request); + let request = + ListNeuronsProto::decode(&arg_data_raw()[..]).expect("Could not decode ListNeuronsProto"); + let candid_request = ListNeurons::from(request); + let res: ListNeuronsResponse = list_neurons(candid_request); let mut buf = Vec::with_capacity(res.encoded_len()); res.encode(&mut buf) .map_err(|e| e.to_string()) diff --git a/rs/nns/governance/canister/governance.did b/rs/nns/governance/canister/governance.did index b500b12a768..da9aaffd272 100644 --- a/rs/nns/governance/canister/governance.did +++ b/rs/nns/governance/canister/governance.did @@ -450,6 +450,11 @@ type ListNeurons = record { page_number: opt nat64; page_size: opt nat64; + neuron_subaccounts: opt vec NeuronSubaccount; +}; + +type NeuronSubaccount = record { + subaccount : blob; }; // Output of the list_neurons method. diff --git a/rs/nns/governance/canister/governance_test.did b/rs/nns/governance/canister/governance_test.did index 4a46fb58f15..db1ed760ff2 100644 --- a/rs/nns/governance/canister/governance_test.did +++ b/rs/nns/governance/canister/governance_test.did @@ -440,6 +440,11 @@ type ListNeurons = record { include_neurons_readable_by_caller : bool; page_number: opt nat64; page_size: opt nat64; + neuron_subaccounts: opt vec NeuronSubaccount; +}; + +type NeuronSubaccount = record { + subaccount : blob; }; type ListNeuronsResponse = record { diff --git a/rs/nns/governance/src/governance.rs b/rs/nns/governance/src/governance.rs index 7566da55054..f57ffe325db 100644 --- a/rs/nns/governance/src/governance.rs +++ b/rs/nns/governance/src/governance.rs @@ -2240,6 +2240,7 @@ impl Governance { include_public_neurons_in_full_neurons, page_number, page_size, + neuron_subaccounts, } = list_neurons; let page_number = page_number.unwrap_or(0); @@ -2275,10 +2276,27 @@ impl Governance { BTreeSet::new() }; + let mut neurons_by_subaccount: BTreeSet = neuron_subaccounts + .as_ref() + .map(|subaccounts| { + subaccounts + .iter() + .flat_map(|neuron_subaccount| { + Self::bytes_to_subaccount(&neuron_subaccount.subaccount) + .ok() + .and_then(|subaccount| { + self.neuron_store.get_neuron_id_for_subaccount(subaccount) + }) + }) + .collect() + }) + .unwrap_or_default(); + // Concatenate (explicit and implicit)-ly included neurons. let mut requested_neuron_ids: BTreeSet = neuron_ids.iter().map(|id| NeuronId { id: *id }).collect(); requested_neuron_ids.append(&mut implicitly_requested_neuron_ids); + requested_neuron_ids.append(&mut neurons_by_subaccount); // These will be assembled into the final result. let mut neuron_infos = hashmap![]; diff --git a/rs/nns/governance/src/governance/benches.rs b/rs/nns/governance/src/governance/benches.rs index 865a881d727..53aa308c6fd 100644 --- a/rs/nns/governance/src/governance/benches.rs +++ b/rs/nns/governance/src/governance/benches.rs @@ -29,6 +29,7 @@ use ic_nns_common::{ types::NeuronId, }; use ic_nns_constants::GOVERNANCE_CANISTER_ID; +use ic_nns_governance_api::pb::v1::list_neurons::NeuronSubaccount; use ic_nns_governance_api::pb::v1::ListNeurons; use icp_ledger::Subaccount; use maplit::hashmap; @@ -530,6 +531,57 @@ fn compute_ballots_for_new_proposal_with_stable_neurons() -> BenchResult { }) } +fn list_neurons_by_subaccount_benchmark() -> BenchResult { + let neurons = (0..100) + .map(|id| { + (id, { + let mut neuron: NeuronProto = make_neuron( + id, + PrincipalId::new_user_test_id(id), + 1_000_000_000, + hashmap! {}, // get the default followees + ) + .into(); + neuron.hot_keys = vec![PrincipalId::new_user_test_id(1)]; + neuron + }) + }) + .collect::>(); + + let subaccounts = neurons + .values() + .map(|neuron| NeuronSubaccount { + subaccount: neuron.account.clone(), + }) + .collect(); + + let governance_proto = GovernanceProto { + neurons, + ..GovernanceProto::default() + }; + + let governance = Governance::new( + governance_proto, + Box::new(MockEnvironment::new(Default::default(), 0)), + Box::new(StubIcpLedger {}), + Box::new(StubCMC {}), + ); + + let request = ListNeurons { + neuron_ids: vec![], + include_neurons_readable_by_caller: false, + include_empty_neurons_readable_by_caller: Some(false), + include_public_neurons_in_full_neurons: None, + page_number: None, + page_size: None, + neuron_subaccounts: Some(subaccounts), + }; + + bench_fn(|| { + governance.list_neurons(&request, PrincipalId::new_user_test_id(1)); + }) +} + fn list_neurons_benchmark() -> BenchResult { let neurons = (0..100) .map(|id| { @@ -565,6 +617,7 @@ fn list_neurons_benchmark() -> BenchResult { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }; bench_fn(|| { @@ -588,6 +641,22 @@ fn list_neurons_heap() -> BenchResult { list_neurons_benchmark() } +/// Benchmark list_neurons +#[bench(raw)] +fn list_neurons_by_subaccount_stable() -> BenchResult { + let _a = temporarily_enable_allow_active_neurons_in_stable_memory(); + let _b = temporarily_enable_migrate_active_neurons_to_stable_memory(); + list_neurons_by_subaccount_benchmark() +} + +/// Benchmark list_neurons +#[bench(raw)] +fn list_neurons_by_subaccount_heap() -> BenchResult { + let _a = temporarily_disable_allow_active_neurons_in_stable_memory(); + let _b = temporarily_disable_migrate_active_neurons_to_stable_memory(); + list_neurons_by_subaccount_benchmark() +} + fn create_service_nervous_system_action_with_large_payload() -> CreateServiceNervousSystem { let mut action = CREATE_SERVICE_NERVOUS_SYSTEM_WITH_MATCHED_FUNDING.clone(); diff --git a/rs/nns/governance/src/governance/tests/list_neurons.rs b/rs/nns/governance/src/governance/tests/list_neurons.rs index 46019e5ac0e..7ccb825dce1 100644 --- a/rs/nns/governance/src/governance/tests/list_neurons.rs +++ b/rs/nns/governance/src/governance/tests/list_neurons.rs @@ -5,6 +5,7 @@ use crate::{ }; use ic_base_types::PrincipalId; use ic_nns_common::pb::v1::NeuronId; +use ic_nns_governance_api::pb::v1::list_neurons::NeuronSubaccount; use ic_nns_governance_api::pb::v1::ListNeurons; #[test] @@ -50,6 +51,7 @@ fn test_list_neurons_with_paging() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: Some(vec![]), }; let response_with_no_page_number = governance.list_neurons(&request, user_id); @@ -68,6 +70,7 @@ fn test_list_neurons_with_paging() { include_public_neurons_in_full_neurons: None, page_number: Some(1), page_size: None, + neuron_subaccounts: None, }, user_id, ); @@ -84,6 +87,7 @@ fn test_list_neurons_with_paging() { include_public_neurons_in_full_neurons: None, page_number: Some(0), page_size: Some(501), + neuron_subaccounts: None, }, user_id, ); @@ -91,3 +95,82 @@ fn test_list_neurons_with_paging() { assert_eq!(response.full_neurons.len(), 500); assert_eq!(response.total_pages_available, Some(2)); } + +#[test] +fn test_list_neurons_by_subaccounts_and_ids() { + let user_id = PrincipalId::new_user_test_id(100); + + let neurons = (1..1000u64) + .map(|id| { + let dissolve_state = DissolveState::DissolveDelaySeconds(100); + let account = crate::test_utils::test_subaccount_for_neuron_id(id); + ( + id, + Neuron { + id: Some(NeuronId::from_u64(id)), + controller: Some(user_id), + account, + dissolve_state: Some(dissolve_state), + // Fill in the rest as needed (stake, maturity, etc.) + ..Default::default() + }, + ) + }) + .collect(); + + let governance = Governance::new( + crate::pb::v1::Governance { + neurons, + economics: Some(NetworkEconomics { + voting_power_economics: Some(Default::default()), + ..Default::default() + }), + ..crate::pb::v1::Governance::default() + }, + Box::new(MockEnvironment::new(Default::default(), 0)), + Box::new(StubIcpLedger {}), + Box::new(StubCMC {}), + ); + + let request = ListNeurons { + neuron_ids: (1..501).collect(), + include_neurons_readable_by_caller: false, + include_empty_neurons_readable_by_caller: None, + include_public_neurons_in_full_neurons: None, + page_number: None, + page_size: None, + neuron_subaccounts: Some( + (501..1000) + .map(|id| NeuronSubaccount { + subaccount: crate::test_utils::test_subaccount_for_neuron_id(id), + }) + .collect(), + ), + }; + + let response_1 = governance.list_neurons(&request, user_id); + assert_eq!(response_1.full_neurons.len(), 500); + assert_eq!(response_1.total_pages_available, Some(2)); + + let response_2 = governance.list_neurons( + &ListNeurons { + neuron_ids: (1..501).collect(), + include_neurons_readable_by_caller: false, + include_empty_neurons_readable_by_caller: None, + include_public_neurons_in_full_neurons: None, + page_number: Some(1), + page_size: None, + neuron_subaccounts: Some( + (501..1000) + .map(|id| NeuronSubaccount { + subaccount: crate::test_utils::test_subaccount_for_neuron_id(id), + }) + .collect(), + ), + }, + user_id, + ); + + assert_eq!(response_2.full_neurons.len(), 499); + assert_eq!(response_2.total_pages_available, Some(2)); +} diff --git a/rs/nns/governance/src/lib.rs b/rs/nns/governance/src/lib.rs index 93ebe8c68d0..8795374c916 100644 --- a/rs/nns/governance/src/lib.rs +++ b/rs/nns/governance/src/lib.rs @@ -195,9 +195,9 @@ pub const DEFAULT_VOTING_POWER_REFRESHED_TIMESTAMP_SECONDS: u64 = 1725148800; // leave this here indefinitely, but it will just be clutter after a modest // amount of time. thread_local! { - + // TODO(NNS1-3601): Delete these (assuming all goes well, ofc) in mid March. + // There is already a draft PR for this. static IS_VOTING_POWER_ADJUSTMENT_ENABLED: Cell = const { Cell::new(true) }; - static IS_PRUNE_FOLLOWING_ENABLED: Cell = const { Cell::new(true) }; static ALLOW_ACTIVE_NEURONS_IN_STABLE_MEMORY: Cell = const { Cell::new(true) }; diff --git a/rs/nns/governance/src/neuron_store/metrics.rs b/rs/nns/governance/src/neuron_store/metrics.rs index f7b170918c4..ade908e267c 100644 --- a/rs/nns/governance/src/neuron_store/metrics.rs +++ b/rs/nns/governance/src/neuron_store/metrics.rs @@ -111,19 +111,23 @@ impl NeuronMetrics { now_seconds: u64, neuron: &Neuron, ) { + // The substraction here assumes that the neuron was not refreshed in + // the future. (This doesn't always hold in tests though, due to the + // difficulty of constructing realistic data/scenarios.) let seconds_since_voting_power_refreshed = - // Here, we assume that the neuron was not refreshed in the future. - // This doesn't always hold in tests though, due to the difficulty - // of constructing realistic data/scenarios. now_seconds.saturating_sub(neuron.voting_power_refreshed_timestamp_seconds()); - let Some(seconds_losing_voting_power) = seconds_since_voting_power_refreshed - .checked_sub(voting_power_economics.get_start_reducing_voting_power_after_seconds()) - else { + + let is_recently_refreshed = seconds_since_voting_power_refreshed + < voting_power_economics.get_start_reducing_voting_power_after_seconds(); + if is_recently_refreshed { return; - }; + } - if seconds_losing_voting_power < voting_power_economics.get_clear_following_after_seconds() - { + let is_moderately_refreshed = seconds_since_voting_power_refreshed + < voting_power_economics + .get_start_reducing_voting_power_after_seconds() + .saturating_add(voting_power_economics.get_clear_following_after_seconds()); + if is_moderately_refreshed { self.declining_voting_power_neuron_subset_metrics.increment( voting_power_economics, now_seconds, diff --git a/rs/nns/governance/tests/governance.rs b/rs/nns/governance/tests/governance.rs index c31d9f45519..9eb3fbb310d 100644 --- a/rs/nns/governance/tests/governance.rs +++ b/rs/nns/governance/tests/governance.rs @@ -10610,6 +10610,7 @@ fn test_include_public_neurons_in_full_neurons() { page_number: None, page_size: None, + neuron_subaccounts: None, }, caller, ); @@ -14674,6 +14675,7 @@ fn test_neuron_info_private_enforcement() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }, requester, ) diff --git a/rs/nns/governance/unreleased_changelog.md b/rs/nns/governance/unreleased_changelog.md index 99b6a240520..31ccbc9f14c 100644 --- a/rs/nns/governance/unreleased_changelog.md +++ b/rs/nns/governance/unreleased_changelog.md @@ -9,33 +9,16 @@ on the process that this file is part of, see ## Added -### List Neurons Paging +### List Neurons API Change: Query by Subaccount -Two new fields are added to the request, and one to the response. +The `list_neurons` API now supports querying by neuron subaccount. This is useful for neuron holders who +have many neurons and want to list only the neurons associated with a particular subaccount. -The request now supports `page_size` and `page_number`. If `page_size` is greater than -`MAX_LIST_NEURONS_RESULTS` (currently 500), the API will treat it as `MAX_LIST_NEURONS_RESULTS`, and -continue procesisng the request. If `page_number` is None, the API will treat it as Some(0) +A new field `neuron_subaccounts` is added to the request, which is a list of subaccounts to query +for. If this field is present, any neurons found will be added to the response. If duplicate +neurons are found between this field and others, they will be deduplicated before returning the value. -In the response, a field `total_pages_available` is available to tell the user how many -additional requests need to be made. - -This will only affect neuron holders with more than 500 neurons, which is a small minority. - -This allows neuron holders with many neurons to list all of their neurons, whereas before, -responses could be too large to be sent by the protocol. - -### Migrating Active Neurons to Stable Memory - -In this relesae, we turn on 2 features related to migrating active neurons to stable memory: - -1. `allow_active_neurons_in_stable_memory`: this allows the canister to look for active neurons in - stable memory, while previously the canister always assumes active neurons are always in the heap. - -2. `use_stable_memory_following_index`: this lets the canister use the neuron following index in the - stable memory, instead of the one in the heap. - -No neurons are actually migrated yet. +This new field works in the same way that the existing `neuron_ids` field works. ## Changed diff --git a/rs/nns/integration_tests/src/governance_neurons.rs b/rs/nns/integration_tests/src/governance_neurons.rs index 11c29e68626..0af2990e8c6 100644 --- a/rs/nns/integration_tests/src/governance_neurons.rs +++ b/rs/nns/integration_tests/src/governance_neurons.rs @@ -5,6 +5,7 @@ use dfn_candid::candid_one; use dfn_protobuf::protobuf; use ic_base_types::PrincipalId; use ic_canister_client_sender::Sender; +use ic_nervous_system_common::ledger::compute_neuron_staking_subaccount_bytes; use ic_nervous_system_common_test_keys::{ TEST_NEURON_1_ID, TEST_NEURON_1_OWNER_KEYPAIR, TEST_NEURON_1_OWNER_PRINCIPAL, TEST_NEURON_2_ID, TEST_NEURON_2_OWNER_PRINCIPAL, @@ -13,6 +14,7 @@ use ic_nns_common::pb::v1::NeuronId as NeuronIdProto; use ic_nns_governance::governance::INITIAL_NEURON_DISSOLVE_DELAY; use ic_nns_governance_api::pb::v1::{ governance_error::ErrorType, + list_neurons::NeuronSubaccount, manage_neuron::{Command, Merge, NeuronIdOrSubaccount, Spawn}, manage_neuron_response::{ Command as CommandResponse, {self}, @@ -553,6 +555,7 @@ fn test_list_neurons() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }, ); assert_eq!(list_neurons_response.neuron_infos.len(), 3); @@ -569,6 +572,7 @@ fn test_list_neurons() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }, ); assert_eq!(list_neurons_response.neuron_infos.len(), 2); @@ -585,6 +589,7 @@ fn test_list_neurons() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: Some(vec![]), // Should be equivalent to None }, ); assert_eq!(list_neurons_response.neuron_infos.len(), 1); @@ -598,12 +603,35 @@ fn test_list_neurons() { ListNeurons { neuron_ids: vec![neuron_id_3.id], include_neurons_readable_by_caller: true, - include_empty_neurons_readable_by_caller: None, + include_empty_neurons_readable_by_caller: Some(true), include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: Some(vec![]), }, ); - assert_eq!(list_neurons_response.neuron_infos.len(), 2); - assert_eq!(list_neurons_response.full_neurons.len(), 1); + assert_eq!(list_neurons_response.neuron_infos.len(), 3); + assert_eq!(list_neurons_response.full_neurons.len(), 2); + + // Step 6: Same but specify neuron 3 by subaccount. + // empty neurons, also specifying neuron 3 which the caller does not control. + + let subaccount = compute_neuron_staking_subaccount_bytes(principal_2, 3); + let list_neurons_response = list_neurons( + &state_machine, + principal_1, + ListNeurons { + neuron_ids: vec![], + include_neurons_readable_by_caller: true, + include_empty_neurons_readable_by_caller: Some(true), + include_public_neurons_in_full_neurons: None, + page_number: None, + page_size: None, + neuron_subaccounts: Some(vec![NeuronSubaccount { + subaccount: subaccount.to_vec(), + }]), + }, + ); + assert_eq!(list_neurons_response.neuron_infos.len(), 3); + assert_eq!(list_neurons_response.full_neurons.len(), 2); } diff --git a/rs/nns/integration_tests/src/neuron_voting.rs b/rs/nns/integration_tests/src/neuron_voting.rs index 4d00f02180a..eba4c85cc81 100644 --- a/rs/nns/integration_tests/src/neuron_voting.rs +++ b/rs/nns/integration_tests/src/neuron_voting.rs @@ -402,6 +402,7 @@ fn test_voting_can_span_multiple_rounds() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: Some(vec![]), }, ); @@ -429,6 +430,7 @@ fn test_voting_can_span_multiple_rounds() { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: Some(vec![]), }, ); diff --git a/rs/nns/sns-wasm/unreleased_changelog.md b/rs/nns/sns-wasm/unreleased_changelog.md index 94126a0ff42..ebb1c72b5d6 100644 --- a/rs/nns/sns-wasm/unreleased_changelog.md +++ b/rs/nns/sns-wasm/unreleased_changelog.md @@ -15,6 +15,10 @@ on the process that this file is part of, see ## Removed +* Logos are no longer included into *serialized* initial SNS initialization parameters for newly + deployed SNSs. They are, of course, still included in the metadata responses: + `SnsGov.get_metadata` and `SnsLedger.icrc1_metadata`. + ## Fixed ## Security diff --git a/rs/nns/test_utils/src/state_test_helpers.rs b/rs/nns/test_utils/src/state_test_helpers.rs index 91fb87a37f7..819e11e898c 100644 --- a/rs/nns/test_utils/src/state_test_helpers.rs +++ b/rs/nns/test_utils/src/state_test_helpers.rs @@ -1619,6 +1619,7 @@ pub fn list_neurons_by_principal( include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }, ) } diff --git a/rs/protobuf/src/registry/subnet.rs b/rs/protobuf/src/registry/subnet.rs index df287edf5ee..3d8d64bb5e8 100644 --- a/rs/protobuf/src/registry/subnet.rs +++ b/rs/protobuf/src/registry/subnet.rs @@ -1,48 +1,3 @@ #[allow(clippy::all)] #[path = "../gen/registry/registry.subnet.v1.rs"] pub mod v1; - -use crate::registry::subnet::v1::{ - ChainKeyConfig as ChainKeyConfigPb, EcdsaConfig as EcdsaConfigPb, KeyConfig as KeyConfigPb, -}; -use crate::types::v1::{master_public_key_id, MasterPublicKeyId as MasterPublicKeyIdPb}; - -/// This code is part of the data migration from `EcdsaConfig` to `ChainKeyConfig`. -/// -/// Use this implementation to retrofit the values from an existing `EcdsaConfig` instance in places -/// where we now need a `ChainKeyConfig` instance. -/// -/// TODO[NNS1-2986]: Remove this code. -impl From for ChainKeyConfigPb { - fn from(src: EcdsaConfigPb) -> Self { - let EcdsaConfigPb { - key_ids, - quadruples_to_create_in_advance, - max_queue_size, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - } = src; - - let pre_signatures_to_create_in_advance = Some(quadruples_to_create_in_advance); - let max_queue_size = Some(max_queue_size); - - let key_configs = key_ids - .into_iter() - .map(|key_id| { - let key_id = Some(master_public_key_id::KeyId::Ecdsa(key_id)); - let key_id = Some(MasterPublicKeyIdPb { key_id }); - KeyConfigPb { - key_id, - pre_signatures_to_create_in_advance, - max_queue_size, - } - }) - .collect(); - - Self { - key_configs, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - } - } -} diff --git a/rs/registry/canister/src/mutations/do_create_subnet.rs b/rs/registry/canister/src/mutations/do_create_subnet.rs index 6d6433aec0c..be4900b68a9 100644 --- a/rs/registry/canister/src/mutations/do_create_subnet.rs +++ b/rs/registry/canister/src/mutations/do_create_subnet.rs @@ -11,7 +11,7 @@ use ic_management_canister_types::{ use ic_protobuf::registry::{ node::v1::NodeRecord, subnet::v1::{ - CatchUpPackageContents, ChainKeyConfig as ChainKeyConfigPb, EcdsaConfig as EcdsaConfigPb, + CatchUpPackageContents, ChainKeyConfig as ChainKeyConfigPb, SubnetFeatures as SubnetFeaturesPb, SubnetRecord, }, }; @@ -19,9 +19,7 @@ use ic_registry_keys::{ make_catch_up_package_contents_key, make_crypto_threshold_signing_pubkey_key, make_node_record_key, make_subnet_list_record_key, make_subnet_record_key, }; -use ic_registry_subnet_features::{ - EcdsaConfig, KeyConfig as KeyConfigInternal, SubnetFeatures, DEFAULT_ECDSA_MAX_QUEUE_SIZE, -}; +use ic_registry_subnet_features::{KeyConfig as KeyConfigInternal, SubnetFeatures}; use ic_registry_subnet_type::SubnetType; use ic_registry_transport::pb::v1::{registry_mutation, RegistryMutation, RegistryValue}; use on_wire::bytes; @@ -84,17 +82,7 @@ impl Registry { // 2b. Invoke compute_initial_i_dkg_dealings on ic_00 - // TODO[NNS1-3022]: Stop reading `payload.ecdsa_config` and mutating `payload`. - - // Legacy ECDSA data is used only if there is nothing in `payload.chain_key_config`. - // Even if legacy ECDSA data is used, it is converted to `InitialChainKeyConfig` here. - let initial_chain_key_config_from_legacy_source = - payload.ecdsa_config.clone().map(|ecdsa_initial_config| { - InitialChainKeyConfigInternal::try_from(ecdsa_initial_config) - .expect("Invalid EcdsaInitialConfig") - }); - - let initial_chain_key_config_from_new_source = + let initial_chain_key_config = payload .chain_key_config .clone() @@ -103,17 +91,12 @@ impl Registry { .expect("Invalid InitialChainKeyConfig") }); - let initial_chain_key_config = initial_chain_key_config_from_new_source - .or(initial_chain_key_config_from_legacy_source); - let receiver_nodes = payload.node_ids.clone(); let chain_key_initializations = self .get_all_initial_i_dkg_dealings_from_ic00(&initial_chain_key_config, receiver_nodes) .await; - // `payload` needs to be canonicalized, ensuring `ecdsa_config: None`. let payload = CreateSubnetPayload { - ecdsa_config: None, chain_key_config: initial_chain_key_config.map(InitialChainKeyConfig::from), ..payload }; @@ -190,13 +173,17 @@ impl Registry { } /// Validates runtime payload values that aren't checked by invariants. + /// Ensures that the obsolete ECDSA keys are not specified. /// Ensures all nodes for new subnet a) exist and b) are not in another subnet. /// Ensure all nodes for new subnet are not already assigned as ApiBoundaryNode. - /// Ensures that ECDSA keys are not specified using both the (deprecated) `ecdsa_config` and - /// the new `chain_key_config` fields. /// Ensures that a valid `subnet_id` is specified for `KeyConfigRequest`s. /// Ensures that master public keys (a) exist and (b) are present on the requested subnet. fn validate_create_subnet_payload(&self, payload: &CreateSubnetPayload) { + assert_eq!( + payload.ecdsa_config, None, + "Field ecdsa_config is deprecated. Please use chain_key_config instead.", + ); + // Verify that all Nodes exist payload.node_ids.iter().for_each(|node_id| { match self.get( @@ -246,42 +233,22 @@ impl Registry { } }); + let Some(initial_chain_key_config) = &payload.chain_key_config else { + return; // Nothing to do. + }; + let prevalidated_initial_chain_key_config = - match (&payload.ecdsa_config, &payload.chain_key_config) { - (Some(_), Some(_)) => { - panic!( - "Deprecated field ecdsa_config cannot be specified with chain_key_config." - ); - } - (Some(ecdsa_initial_config), None) => { - let initial_chain_key_config_from_legacy_source = - InitialChainKeyConfigInternal::try_from(ecdsa_initial_config.clone()) - .unwrap_or_else(|err| { - panic!( - "{}Cannot prevalidate ChainKeyConfig derived from EcdsaInitialConfig: \ - {}", LOG_PREFIX, err - ); - }); - Some(initial_chain_key_config_from_legacy_source) - } - (None, Some(initial_chain_key_config)) => { - let initial_chain_key_config_from_new_source = - InitialChainKeyConfigInternal::try_from(initial_chain_key_config.clone()) - .unwrap_or_else(|err| { - panic!("{}Cannot prevalidate ChainKeyConfig: {}", LOG_PREFIX, err); - }); - Some(initial_chain_key_config_from_new_source) - } - (None, None) => None, - }; - if let Some(prevalidated_initial_chain_key_config) = prevalidated_initial_chain_key_config { - let own_subnet_id = None; - self.validate_initial_chain_key_config( - &prevalidated_initial_chain_key_config, - own_subnet_id, - ) - .unwrap_or_else(|err| panic!("{}Cannot validate ChainKeyConfig: {}", LOG_PREFIX, err)); - } + InitialChainKeyConfigInternal::try_from(initial_chain_key_config.clone()) + .unwrap_or_else(|err| { + panic!("{}Cannot prevalidate ChainKeyConfig: {}", LOG_PREFIX, err); + }); + + let own_subnet_id = None; + self.validate_initial_chain_key_config( + &prevalidated_initial_chain_key_config, + own_subnet_id, + ) + .unwrap_or_else(|err| panic!("{}Cannot validate ChainKeyConfig: {}", LOG_PREFIX, err)); } } @@ -318,9 +285,7 @@ pub struct CreateSubnetPayload { pub ssh_readonly_access: Vec, pub ssh_backup_access: Vec, - // Deprecated. Please use `chain_key_config` instead. - // - // TODO[NNS1-3022]: Make this field obsolete. + // Obsolete. Please use `chain_key_config` instead. pub ecdsa_config: Option, pub chain_key_config: Option, @@ -507,56 +472,6 @@ impl TryFrom for KeyConfigRequestInternal { } } -// TODO[NNS1-3022]: Remove this code. -impl TryFrom for InitialChainKeyConfigInternal { - type Error = String; - - fn try_from(src: EcdsaInitialConfig) -> Result { - let EcdsaInitialConfig { - quadruples_to_create_in_advance, - keys, - max_queue_size, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - } = src; - let pre_signatures_to_create_in_advance = quadruples_to_create_in_advance; - let max_queue_size = max_queue_size.unwrap_or(DEFAULT_ECDSA_MAX_QUEUE_SIZE); - - let mut errors = vec![]; - let key_configs = keys - .into_iter() - .filter_map(|EcdsaKeyRequest { key_id, subnet_id }| { - let Some(subnet_id) = subnet_id else { - errors.push(format!( - "EcdsaKeyRequest.subnet_id must be set (.key_id = {:?})", - key_id - )); - return None; - }; - Some(KeyConfigRequestInternal { - key_config: KeyConfigInternal { - key_id: MasterPublicKeyId::Ecdsa(key_id), - pre_signatures_to_create_in_advance, - max_queue_size, - }, - subnet_id, - }) - }) - .collect(); - - if !errors.is_empty() { - let errors = errors.join(", "); - return Err(format!("Invalid EcdsaInitialConfig: {}", errors)); - } - - Ok(Self { - key_configs, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - }) - } -} - #[derive(Clone, Eq, PartialEq, Debug, Default, CandidType, Deserialize, Serialize)] pub struct EcdsaInitialConfig { pub quadruples_to_create_in_advance: u32, @@ -573,38 +488,6 @@ pub struct EcdsaKeyRequest { pub subnet_id: Option, } -impl From for EcdsaConfigPb { - fn from(val: EcdsaInitialConfig) -> Self { - Self { - quadruples_to_create_in_advance: val.quadruples_to_create_in_advance, - key_ids: val - .keys - .iter() - .map(|val| (&val.key_id).into()) - .collect::>(), - max_queue_size: val.max_queue_size.unwrap_or(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: val.signature_request_timeout_ns, - idkg_key_rotation_period_ms: val.idkg_key_rotation_period_ms, - } - } -} - -impl From for EcdsaConfig { - fn from(val: EcdsaInitialConfig) -> Self { - Self { - quadruples_to_create_in_advance: val.quadruples_to_create_in_advance, - key_ids: val - .keys - .iter() - .map(|val| val.key_id.clone()) - .collect::>(), - max_queue_size: val.max_queue_size, - signature_request_timeout_ns: val.signature_request_timeout_ns, - idkg_key_rotation_period_ms: val.idkg_key_rotation_period_ms, - } - } -} - impl From for SubnetRecord { fn from(val: CreateSubnetPayload) -> Self { SubnetRecord { @@ -655,7 +538,7 @@ mod test { }; use ic_management_canister_types::EcdsaCurve; use ic_nervous_system_common_test_keys::{TEST_USER1_PRINCIPAL, TEST_USER2_PRINCIPAL}; - use ic_registry_subnet_features::ChainKeyConfig; + use ic_registry_subnet_features::{ChainKeyConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE}; use ic_types::ReplicaVersion; // Note: this can only be unit-tested b/c it fails before we hit inter-canister calls @@ -668,16 +551,18 @@ mod test { let mut registry = invariant_compliant_registry(0); let payload = CreateSubnetPayload { replica_version_id: ReplicaVersion::default().into(), - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id: EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }, + chain_key_config: Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: "fake_key_id".to_string(), + })), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(*TEST_USER2_PRINCIPAL), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }), @@ -688,7 +573,7 @@ mod test { } #[test] - #[should_panic(expected = "EcdsaKeyRequest.subnet_id must be set")] + #[should_panic(expected = "KeyConfigRequest.subnet_id must be specified")] fn should_panic_if_ecdsa_keys_subnet_not_specified() { // Set up a subnet that has the key but fail to specify subnet_id in request let key_id = EcdsaKeyId { @@ -706,14 +591,16 @@ mod test { let mut subnet_record: SubnetRecord = get_invariant_compliant_subnet_record(node_ids_and_dkg_pks.keys().copied().collect()); - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_id.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + let chain_key_config = ChainKeyConfig { + key_configs: vec![KeyConfigInternal { + key_id: MasterPublicKeyId::Ecdsa(key_id.clone()), + pre_signatures_to_create_in_advance: 1, + max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }; - let chain_key_config = ChainKeyConfig::from(ecdsa_config); subnet_record.chain_key_config = Some(ChainKeyConfigPb::from(chain_key_config)); let fake_subnet_mutation = add_fake_subnet( @@ -727,13 +614,15 @@ mod test { // Make a request for the key from a subnet that does not have the key let payload = CreateSubnetPayload { replica_version_id: ReplicaVersion::default().into(), - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id, + chain_key_config: Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: None, }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }), @@ -764,14 +653,16 @@ mod test { let mut subnet_record: SubnetRecord = get_invariant_compliant_subnet_record(node_ids_and_dkg_pks.keys().copied().collect()); - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_id.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + let chain_key_config = ChainKeyConfig { + key_configs: vec![KeyConfigInternal { + key_id: MasterPublicKeyId::Ecdsa(key_id.clone()), + pre_signatures_to_create_in_advance: 1, + max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }; - let chain_key_config = ChainKeyConfig::from(ecdsa_config); subnet_record.chain_key_config = Some(ChainKeyConfigPb::from(chain_key_config)); let fake_subnet_mutation = add_fake_subnet( @@ -785,13 +676,15 @@ mod test { // Make a request for the key from a subnet that does not have the key let payload = CreateSubnetPayload { replica_version_id: ReplicaVersion::default().into(), - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id, + chain_key_config: Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(*TEST_USER2_PRINCIPAL), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }), @@ -820,14 +713,17 @@ mod test { let mut subnet_list_record = registry.get_subnet_list_record(); let mut subnet_record: SubnetRecord = get_invariant_compliant_subnet_record(node_ids_and_dkg_pks.keys().copied().collect()); - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_id.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + let chain_key_config = ChainKeyConfig { + key_configs: vec![KeyConfigInternal { + key_id: MasterPublicKeyId::Ecdsa(key_id.clone()), + pre_signatures_to_create_in_advance: 1, + max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }; - let chain_key_config = ChainKeyConfig::from(ecdsa_config); + let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); subnet_record.chain_key_config = Some(chain_key_config_pb); @@ -840,63 +736,23 @@ mod test { registry.maybe_apply_mutation_internal(fake_subnet_mutation); // Step 2: Try to create another subnet with duplicate keys, which should panic. - let key_request = EcdsaKeyRequest { - key_id, - subnet_id: Some(*TEST_USER1_PRINCIPAL), - }; - let payload = CreateSubnetPayload { - replica_version_id: ReplicaVersion::default().into(), - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![key_request; 2], + let key_config_request = KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, }), - ..Default::default() - }; - futures::executor::block_on(registry.do_create_subnet(payload)); - } - - // TODO[NNS1-3022]: Replace this test with one that checks that `subnet_record.ecdsa_config` - // TODO[NNS1-3022]: cannot be set. - #[test] - #[should_panic( - expected = "Deprecated field ecdsa_config cannot be specified with chain_key_config." - )] - fn test_disallow_legacy_and_chain_key_ecdsa_config_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), + subnet_id: Some(*TEST_USER1_PRINCIPAL), }; - let mut registry = invariant_compliant_registry(0); - - // Make a request for the key from a subnet that does not have the key. let payload = CreateSubnetPayload { replica_version_id: ReplicaVersion::default().into(), - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id: key_id.clone(), - subnet_id: Some(*TEST_USER2_PRINCIPAL), - }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - ..Default::default() - }), chain_key_config: Some(InitialChainKeyConfig { - key_configs: vec![KeyConfigRequest { - key_config: Some(KeyConfig { - key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), - pre_signatures_to_create_in_advance: Some(1), - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - }), - subnet_id: Some(*TEST_USER2_PRINCIPAL), - }], - ..Default::default() + key_configs: vec![key_config_request; 2], + signature_request_timeout_ns: None, + idkg_key_rotation_period_ms: None, }), ..Default::default() }; - futures::executor::block_on(registry.do_create_subnet(payload)); } } diff --git a/rs/registry/canister/src/mutations/do_recover_subnet.rs b/rs/registry/canister/src/mutations/do_recover_subnet.rs index cd455590496..94bcdb48e97 100644 --- a/rs/registry/canister/src/mutations/do_recover_subnet.rs +++ b/rs/registry/canister/src/mutations/do_recover_subnet.rs @@ -107,17 +107,7 @@ impl Registry { .await .unwrap(); - // TODO[NNS1-3022]: Stop reading `payload.ecdsa_config` and mutating `payload`. - - // Legacy ECDSA data is used only if there is nothing in `payload.chain_key_config`. - // Even if legacy ECDSA data is used, it is converted to `InitialChainKeyConfig` here. - let initial_chain_key_config_from_legacy_source = - payload.ecdsa_config.clone().map(|ecdsa_initial_config| { - InitialChainKeyConfigInternal::try_from(ecdsa_initial_config) - .expect("Invalid EcdsaInitialConfig") - }); - - let initial_chain_key_config_from_new_source = + let initial_chain_key_config = payload .chain_key_config .clone() @@ -126,9 +116,6 @@ impl Registry { .expect("Invalid InitialChainKeyConfig") }); - let initial_chain_key_config = initial_chain_key_config_from_new_source - .or(initial_chain_key_config_from_legacy_source); - let chain_key_initializations = self .get_all_initial_i_dkg_dealings_from_ic00(&initial_chain_key_config, dkg_nodes) .await; @@ -238,36 +225,24 @@ impl Registry { /// This is similar to validation in do_create_subnet except for constraints to avoid requesting /// keys from the subnet. fn validate_recover_subnet_payload(&self, payload: &RecoverSubnetPayload) { - let initial_chain_key_config = match (&payload.ecdsa_config, &payload.chain_key_config) { - (Some(_), Some(_)) => { - panic!( - "{}Deprecated field ecdsa_config cannot be specified with chain_key_config.", - LOG_PREFIX - ); - } - (Some(ecdsa_initial_config), None) => { - InitialChainKeyConfigInternal::try_from(ecdsa_initial_config.clone()) - .unwrap_or_else(|err| { - panic!( - "{}Invalid RecoverSubnetPayload.ecdsa_config: {}", - LOG_PREFIX, err - ); - }) - } - (None, Some(initial_chain_key_config)) => { - InitialChainKeyConfigInternal::try_from(initial_chain_key_config.clone()) - .unwrap_or_else(|err| { - panic!( - "{}Invalid RecoverSubnetPayload.chain_key_config: {}", - LOG_PREFIX, err - ); - }) - } - (None, None) => { - return; // Nothing else to do. - } + assert_eq!( + payload.ecdsa_config, None, + "Field ecdsa_config is deprecated. Please use chain_key_config instead.", + ); + + let Some(initial_chain_key_config) = &payload.chain_key_config else { + return; // Nothing to do. }; + let initial_chain_key_config = + InitialChainKeyConfigInternal::try_from(initial_chain_key_config.clone()) + .unwrap_or_else(|err| { + panic!( + "{}Invalid RecoverSubnetPayload.chain_key_config: {}", + LOG_PREFIX, err + ); + }); + let own_subnet_id = Some(payload.subnet_id); self.validate_initial_chain_key_config(&initial_chain_key_config, own_subnet_id) .unwrap_or_else(|err| { @@ -296,9 +271,7 @@ pub struct RecoverSubnetPayload { /// downloaded pub registry_store_uri: Option<(String, String, u64)>, - /// Deprecated. Please use `chain_key_config` instead. - /// - /// TODO[NNS1-3022]: Make this field obsolete. + /// Obsolete. Please use `chain_key_config` instead. pub ecdsa_config: Option, /// Chain key configuration must be specified if keys will be recovered to this subnet. @@ -519,16 +492,15 @@ mod test { add_fake_subnet, get_invariant_compliant_subnet_record, invariant_compliant_registry, prepare_registry_with_nodes, }, - mutations::{ - do_create_subnet::{EcdsaInitialConfig, EcdsaKeyRequest}, - do_recover_subnet::{panic_if_record_changed_across_versions, RecoverSubnetPayload}, + mutations::do_recover_subnet::{ + panic_if_record_changed_across_versions, RecoverSubnetPayload, }, registry::Registry, }; use ic_base_types::SubnetId; use ic_management_canister_types::{EcdsaCurve, EcdsaKeyId}; use ic_protobuf::registry::subnet::v1::{ChainKeyConfig as ChainKeyConfigPb, SubnetRecord}; - use ic_registry_subnet_features::{ChainKeyConfig, EcdsaConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE}; + use ic_registry_subnet_features::{ChainKeyConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE}; use ic_registry_transport::{delete, upsert}; use ic_test_utilities_types::ids::subnet_test_id; @@ -557,14 +529,17 @@ mod test { let mut subnet_record: SubnetRecord = get_invariant_compliant_subnet_record(node_ids_and_dkg_pks.keys().copied().collect()); - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_id.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + let chain_key_config = ChainKeyConfig { + key_configs: vec![KeyConfigInternal { + key_id: MasterPublicKeyId::Ecdsa(key_id.clone()), + pre_signatures_to_create_in_advance: 1, + max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }; - let chain_key_config = ChainKeyConfig::from(ecdsa_config); + let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); subnet_record.chain_key_config = Some(chain_key_config_pb); @@ -665,22 +640,24 @@ mod test { expected = "Cannot recover subnet 'ge6io-epiam-aaaaa-aaaap-yai': The requested \ chain key 'ecdsa:Secp256k1:test_key_id' was not found in any subnet." )] - fn do_recover_subnet_should_panic_if_ecdsa_keys_non_existing() { + fn do_recover_subnet_should_panic_if_chain_keys_non_existing() { let mut registry = invariant_compliant_registry(0); let subnet_id = subnet_test_id(1000); let mut payload = get_default_recover_subnet_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id: EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "test_key_id".to_string(), - }, + payload.chain_key_config = Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: "test_key_id".to_string(), + })), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(subnet_id.get()), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); @@ -689,12 +666,9 @@ mod test { } #[test] - #[should_panic( - expected = "Invalid RecoverSubnetPayload.ecdsa_config: Invalid EcdsaInitialConfig: \ - EcdsaKeyRequest.subnet_id must be set (.key_id = EcdsaKeyId { curve: Secp256k1, \ - name: \"test_key_id\" })" - )] - fn do_recover_subnet_should_panic_if_ecdsa_keys_subnet_not_specified() { + #[should_panic(expected = "Invalid RecoverSubnetPayload.chain_key_config: \ + Invalid InitialChainKeyConfig.key_configs: KeyConfigRequest.subnet_id must be specified.")] + fn do_recover_subnet_should_panic_if_chain_keys_subnet_not_specified() { let key_id = EcdsaKeyId { curve: EcdsaCurve::Secp256k1, name: "test_key_id".to_string(), @@ -707,13 +681,16 @@ mod test { // Make a request for the key from a subnet that does not have the key let mut payload = get_default_recover_subnet_payload(subnet_id_to_recover); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id, + + payload.chain_key_config = Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: None, }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); @@ -727,7 +704,7 @@ mod test { 'ecdsa:Secp256k1:test_key_id' is not available in targeted subnet \ '3ifty-exlam-aaaaa-aaaap-yai'." )] - fn do_recover_subnet_should_panic_if_ecdsa_keys_non_existing_from_requested_subnet() { + fn do_recover_subnet_should_panic_if_chain_keys_non_existing_from_requested_subnet() { let key_id = EcdsaKeyId { curve: EcdsaCurve::Secp256k1, name: "test_key_id".to_string(), @@ -740,13 +717,16 @@ mod test { // Make a request for the key from a subnet that does not have the key let mut payload = get_default_recover_subnet_payload(subnet_id_to_recover); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id, + + payload.chain_key_config = Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(subnet_id_to_request_key_from.get()), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); @@ -760,7 +740,7 @@ mod test { chain key 'ecdsa:Secp256k1:test_key_id' by requesting it from itself. \ Subnets cannot recover chain keys from themselves." )] - fn do_recover_subnet_should_panic_if_attempting_to_get_ecdsa_keys_from_itself() { + fn do_recover_subnet_should_panic_if_attempting_to_get_chain_keys_from_itself() { let key_id = EcdsaKeyId { curve: EcdsaCurve::Secp256k1, name: "test_key_id".to_string(), @@ -770,13 +750,16 @@ mod test { // We attempt to get the key from the subnet requesting it let mut payload = get_default_recover_subnet_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id, + + payload.chain_key_config = Some(InitialChainKeyConfig { + key_configs: vec![KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(subnet_id.get()), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); @@ -790,7 +773,7 @@ mod test { chain keys [Ecdsa(EcdsaKeyId { curve: Secp256k1, name: \"test_key_id\" }), \ Ecdsa(EcdsaKeyId { curve: Secp256k1, name: \"test_key_id\" })] have duplicates" )] - fn do_recover_subnet_should_panic_with_duplicate_ecdsa_keys() { + fn do_recover_subnet_should_panic_with_duplicate_chain_keys() { // Step 1: Set up a registry holding an ECDSA key. let key_id = EcdsaKeyId { curve: EcdsaCurve::Secp256k1, @@ -801,55 +784,22 @@ mod test { // Step 2: try to recover a subnet with the key, but the key appears twice, which should cause a panic. let mut payload = get_default_recover_subnet_payload(subnet_id_to_recover); - let key_request = EcdsaKeyRequest { - key_id, + + let chain_key_request = KeyConfigRequest { + key_config: Some(KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }), subnet_id: Some(subnet_id_holding_key.get()), }; - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![key_request; 2], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + payload.chain_key_config = Some(InitialChainKeyConfig { + key_configs: vec![chain_key_request; 2], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); - futures::executor::block_on(registry.do_recover_subnet(payload)); - } - - #[test] - #[should_panic( - expected = "Deprecated field ecdsa_config cannot be specified with chain_key_config." - )] - fn test_disallow_legacy_and_chain_key_ecdsa_config_specification_together() { - // Step 1: Set up a registry holding an ECDSA key. - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "test_key_id".to_string(), - }; - let subnet_id_to_recover = subnet_test_id(1000); - let (mut registry, subnet_id_holding_key) = setup_registry_with_subnet_holding_key(&key_id); - // Step 2: try to recover a subnet with the key, but the key appears twice, which should cause a panic. - let mut payload = get_default_recover_subnet_payload(subnet_id_to_recover); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id: key_id.clone(), - subnet_id: Some(subnet_id_holding_key.get()), - }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - ..Default::default() - }); - payload.chain_key_config = Some(InitialChainKeyConfig { - key_configs: vec![KeyConfigRequest { - key_config: Some(KeyConfig { - key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), - pre_signatures_to_create_in_advance: Some(1), - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - }), - subnet_id: Some(subnet_id_holding_key.get()), - }], - ..Default::default() - }); futures::executor::block_on(registry.do_recover_subnet(payload)); } } diff --git a/rs/registry/canister/src/mutations/do_update_subnet.rs b/rs/registry/canister/src/mutations/do_update_subnet.rs index aed08e76d00..ec23b6b8006 100644 --- a/rs/registry/canister/src/mutations/do_update_subnet.rs +++ b/rs/registry/canister/src/mutations/do_update_subnet.rs @@ -25,6 +25,12 @@ impl Registry { pub fn do_update_subnet(&mut self, payload: UpdateSubnetPayload) { println!("{}do_update_subnet: {:?}", LOG_PREFIX, payload); + assert_eq!( + payload.ecdsa_key_signing_enable, + None, + "Fields ecdsa_key_signing_{{en,dis}}able are deprecated. Please use chain_key_signing_{{en,dis}}able instead.", + ); + self.validate_update_payload_chain_key_config(&payload); self.validate_update_sev_feature(&payload); @@ -40,40 +46,14 @@ impl Registry { let mut mutations = vec![subnet_record_mutation]; - let chain_key_signing_enable = - if let Some(chain_key_signing_enable) = payload.chain_key_signing_enable { - Some(chain_key_signing_enable) - } else if let Some(ecdsa_key_signing_enable) = payload.ecdsa_key_signing_enable { - // TODO[NNS1-3022]: Remove ths branch. - let chain_key_signing_enable = ecdsa_key_signing_enable - .iter() - .cloned() - .map(MasterPublicKeyId::Ecdsa) - .collect(); - Some(chain_key_signing_enable) - } else { - None - }; - if let Some(chain_key_signing_enable) = chain_key_signing_enable { + if let Some(chain_key_signing_enable) = payload.chain_key_signing_enable { mutations.append( &mut self.mutations_to_enable_subnet_signing(subnet_id, &chain_key_signing_enable), ); } - let chain_key_signing_disable = - if let Some(chain_key_signing_disable) = payload.chain_key_signing_disable { - Some(chain_key_signing_disable) - } else if let Some(ecdsa_key_signing_disable) = payload.ecdsa_key_signing_disable { - // TODO[NNS1-3022]: Remove ths branch. - let chain_key_signing_disable = ecdsa_key_signing_disable - .iter() - .cloned() - .map(MasterPublicKeyId::Ecdsa) - .collect(); - Some(chain_key_signing_disable) - } else { - None - }; + let chain_key_signing_disable = payload.chain_key_signing_disable; + if let Some(chain_key_signing_disable) = chain_key_signing_disable { mutations.append( &mut self @@ -91,53 +71,25 @@ impl Registry { fn validate_update_payload_chain_key_config(&self, payload: &UpdateSubnetPayload) { let subnet_id = payload.subnet_id; - let chain_key_config_from_old_source = payload - .ecdsa_config - .clone() - .map(ChainKeyConfigInternal::from); - let chain_key_config_from_new_source = - payload.chain_key_config.clone().map(|chain_key_config| { - ChainKeyConfigInternal::try_from(chain_key_config).unwrap_or_else(|err| { - panic!( - "{}Invalid UpdateSubnetPayload.chain_key_config: {}", - LOG_PREFIX, err - ); - }) - }); + assert_eq!( + payload.ecdsa_key_signing_enable, + None, + "Fields ecdsa_key_signing_{{en,dis}}able are deprecated. Please use chain_key_signing_{{en,dis}}able instead.", + ); - let payload_chain_key_config = match ( - chain_key_config_from_old_source, - chain_key_config_from_new_source, - ) { - (Some(_), Some(_)) => { + assert_eq!( + payload.ecdsa_config, None, + "Field ecdsa_config is deprecated. Please use chain_key_config instead.", + ); + + let payload_chain_key_config = payload.chain_key_config.clone().map(|chain_key_config| { + ChainKeyConfigInternal::try_from(chain_key_config).unwrap_or_else(|err| { panic!( - "{}Deprecated field ecdsa_config cannot be specified with chain_key_config.", - LOG_PREFIX + "{}Invalid UpdateSubnetPayload.chain_key_config: {}", + LOG_PREFIX, err ); - } - (Some(chain_key_config), None) => { - // Old API is used; check that nothing weird is being mixed in from the new API. - assert_eq!(payload.chain_key_signing_enable, None, "{}Deprecated field ecdsa_config cannot be specified with chain_key_signing_enable.", LOG_PREFIX); - assert_eq!(payload.chain_key_signing_disable, None, "{}Deprecated field ecdsa_config cannot be specified with chain_key_signing_disable.", LOG_PREFIX); - Some(chain_key_config) - } - (None, Some(chain_key_config)) => { - // New API is used; check that nothing weird is being mixed in from the old API. - assert_eq!(payload.ecdsa_key_signing_enable, None, "{}Field chain_key_config cannot be specified with deprecated ecdsa_key_signing_enable.", LOG_PREFIX); - assert_eq!(payload.ecdsa_key_signing_disable, None, "{}Field chain_key_config cannot be specified with deprecated ecdsa_key_signing_disable.", LOG_PREFIX); - Some(chain_key_config) - } - (None, None) => { - let has_ecdsa_key_signing_fields = payload.ecdsa_key_signing_enable.is_some() - || payload.ecdsa_key_signing_disable.is_some(); - let has_chain_key_signing_fields = payload.chain_key_signing_enable.is_some() - || payload.chain_key_signing_disable.is_some(); - if has_ecdsa_key_signing_fields && has_chain_key_signing_fields { - panic!("Deprecated fields ecdsa_key_signing_{{en,dis}}able should not be used together with chain_key_signing_{{en,dis}}able."); - } - None - } - }; + }) + }); if let Some(payload_chain_key_config) = payload_chain_key_config { let payload_key_ids = payload_chain_key_config.key_ids(); @@ -197,22 +149,6 @@ impl Registry { } } - // TODO[NNS1-3022]: Remove this code. - if let Some(ref ecdsa_key_signing_enable) = payload.ecdsa_key_signing_enable { - let current_keys = self.get_master_public_keys_held_by_subnet(subnet_id); - for key_id in ecdsa_key_signing_enable { - let key_id = MasterPublicKeyId::Ecdsa(key_id.clone()); - if !current_keys.contains(&key_id) { - panic!( - "{}Proposal attempts to enable signing for ECDSA key '{}' on Subnet '{}', \ - but the subnet does not hold the given key. A proposal to add that key to \ - the subnet must first be separately submitted.", - LOG_PREFIX, key_id, subnet_id - ); - } - } - } - // Validate that proposal is not attempting to disable and enable signing for the same key // in the same proposal if let (Some(chain_key_signing_enable), Some(chain_key_signing_disable)) = ( @@ -230,23 +166,6 @@ impl Registry { ) } } - - // TODO[NNS1-3022]: Remove this code. - if let (Some(ecdsa_signing_enable), Some(ecdsa_signging_disable)) = ( - &payload.ecdsa_key_signing_enable, - &payload.ecdsa_key_signing_disable, - ) { - let enable_set = ecdsa_signing_enable.iter().collect::>(); - let disable_set = ecdsa_signging_disable.iter().collect::>(); - let intersection = enable_set.intersection(&disable_set).collect::>(); - if !intersection.is_empty() { - panic!( - "{}update_subnet aborted: Proposal attempts to enable and disable signing for \ - the same ECDSA keys: {:?}", - LOG_PREFIX, intersection, - ) - } - } } /// Validates that the SEV (AMD Secure Encrypted Virtualization) feature is not changed on @@ -583,17 +502,11 @@ fn merge_subnet_record( maybe_set_option!(subnet_record, features); - // TODO[NNS1-3022]: Stop reading from `UpdateSubnetPayload.ecdsa_config`. - { - let chain_key_config_from_old_source = ecdsa_config.map(ChainKeyConfigInternal::from); - let chain_key_config_from_new_source = chain_key_config.map(|chain_key_config| { - ChainKeyConfigInternal::try_from(chain_key_config) - .expect("Invalid UpdateSubnetPayload.chain_key_config") - }); - let chain_key_config = - chain_key_config_from_new_source.or(chain_key_config_from_old_source); - maybe_set_option!(subnet_record, chain_key_config); - } + let chain_key_config = chain_key_config.map(|chain_key_config| { + ChainKeyConfigInternal::try_from(chain_key_config) + .expect("Invalid UpdateSubnetPayload.chain_key_config") + }); + maybe_set_option!(subnet_record, chain_key_config); maybe_set!(subnet_record, max_number_of_canisters); @@ -613,7 +526,7 @@ mod tests { use ic_management_canister_types::{EcdsaCurve, EcdsaKeyId, SchnorrAlgorithm, SchnorrKeyId}; use ic_nervous_system_common_test_keys::{TEST_USER1_PRINCIPAL, TEST_USER2_PRINCIPAL}; use ic_protobuf::registry::subnet::v1::{ - ChainKeyConfig as ChainKeyConfigPb, EcdsaConfig as EcdsaConfigPb, KeyConfig as KeyConfigPb, + ChainKeyConfig as ChainKeyConfigPb, KeyConfig as KeyConfigPb, SubnetRecord as SubnetRecordPb, }; use ic_protobuf::types::v1::MasterPublicKeyId as MasterPublicKeyIdPb; @@ -688,20 +601,23 @@ mod tests { max_number_of_canisters: 0, ssh_readonly_access: vec![], ssh_backup_access: vec![], - ecdsa_config: None, chain_key_config: None, + ecdsa_config: None, }; - let ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 10, - key_ids: vec![make_ecdsa_key("key_id_1")], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - }); - - let ecdsa_config_pb = ecdsa_config.clone().map(EcdsaConfigPb::from); - let chain_key_config_pb = ecdsa_config_pb.clone().map(ChainKeyConfigPb::from); + let key_id = EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: "key_id".to_string(), + }; + let chain_key_config = ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), + pre_signatures_to_create_in_advance: Some(111), + max_queue_size: Some(222), + }], + signature_request_timeout_ns: Some(333), + idkg_key_rotation_period_ms: Some(444), + }; let payload = UpdateSubnetPayload { subnet_id: SubnetId::from( @@ -729,13 +645,13 @@ mod tests { } .into(), ), - ecdsa_config, + ecdsa_config: None, ecdsa_key_signing_enable: Some(vec![make_ecdsa_key("key_id_2")]), ecdsa_key_signing_disable: None, max_number_of_canisters: Some(10), ssh_readonly_access: Some(vec!["pub_key_0".to_string()]), ssh_backup_access: Some(vec!["pub_key_1".to_string()]), - chain_key_config: None, + chain_key_config: Some(chain_key_config.clone()), chain_key_signing_enable: None, chain_key_signing_disable: None, // Deprecated/unused values follow @@ -774,7 +690,9 @@ mod tests { } .into() ), - chain_key_config: chain_key_config_pb, + chain_key_config: Some(ChainKeyConfigPb::from( + ChainKeyConfigInternal::try_from(chain_key_config).unwrap() + )), ecdsa_config: None, // obsolete (chain_key_config is used instead now) max_number_of_canisters: 10, ssh_readonly_access: vec!["pub_key_0".to_string()], @@ -874,7 +792,7 @@ mod tests { #[test] #[should_panic( - expected = "[Registry] Proposal attempts to enable signing for ECDSA key \ + expected = "[Registry] Proposal attempts to enable signing for chain key \ 'ecdsa:Secp256k1:existing_key_id' on Subnet 'ge6io-epiam-aaaaa-aaaap-yai', \ but the subnet does not hold the given key. A proposal to add that key to the subnet \ must first be separately submitted." @@ -908,14 +826,18 @@ mod tests { )); let mut payload = make_empty_update_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + payload.chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key.clone())), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); - payload.ecdsa_key_signing_enable = Some(vec![key]); + + payload.chain_key_signing_enable = Some(vec![MasterPublicKeyId::Ecdsa(key)]); // Should panic because we are trying to enable a key that hasn't previously held it registry.do_update_subnet(payload); @@ -952,14 +874,26 @@ mod tests { name: "key_id".to_string(), }; let mut payload = make_empty_update_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key.clone(), key.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + payload.chain_key_config = Some(ChainKeyConfig { + key_configs: vec![ + KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key.clone())), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }, + KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key.clone())), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }, + ], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); - payload.ecdsa_key_signing_enable = Some(vec![key]); + + payload.chain_key_signing_enable = Some(vec![MasterPublicKeyId::Ecdsa(key)]); + registry.do_update_subnet(payload); } @@ -968,7 +902,7 @@ mod tests { expected = "[Registry] Chain key with id 'ecdsa:Secp256k1:existing_key_id' already exists. \ IDs must be globally unique." )] - fn test_ecdsa_key_ids_must_be_globally_unique() { + fn test_chain_key_ids_must_be_globally_unique() { // We create 2 subnets. One has the key already, and the other tries to have that key id added // in an update call, which is not allowed. let existing_key_id = EcdsaKeyId { @@ -995,19 +929,20 @@ mod tests { get_invariant_compliant_subnet_record(vec![*first_node_id]); // This marks the subnet as having the key. - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![existing_key_id], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + let chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(existing_key_id)), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, - }; + }); - { - let chain_key_config = ChainKeyConfigInternal::from(ecdsa_config.clone()); - let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); - subnet_holding_key_record.chain_key_config = Some(chain_key_config_pb); - } + subnet_holding_key_record.chain_key_config = chain_key_config.map(|chain_key_config| { + let chain_key_config = ChainKeyConfigInternal::try_from(chain_key_config).unwrap(); + ChainKeyConfigPb::from(chain_key_config) + }); registry.maybe_apply_mutation_internal(add_fake_subnet( subnet_holding_key_id, @@ -1033,13 +968,16 @@ mod tests { // we try an update call with the same existing_key_id to the other subnet // which should fail. let mut payload = make_empty_update_payload(subnet_to_update_id); - payload.ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "existing_key_id".to_string(), + + payload.chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(EcdsaKeyId { + curve: EcdsaCurve::Secp256k1, + name: "existing_key_id".to_string(), + })), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); @@ -1162,7 +1100,7 @@ mod tests { curve: EcdsaCurve::Secp256k1, name: "existing_key_id".to_string(), }; - let master_public_key_held_by_subnet = MasterPublicKeyId::Ecdsa(key_held_by_subnet.clone()); + let master_public_key_held_by_subnet = MasterPublicKeyId::Ecdsa(key_held_by_subnet); // Create first subnet that holds the ECDSA key let (first_node_id, first_dkg_pk) = node_ids_and_dkg_pks @@ -1171,17 +1109,22 @@ mod tests { .expect("should contain at least one node ID"); let mut subnet_holding_key_record = get_invariant_compliant_subnet_record(vec![*first_node_id]); + // This marks the subnet as having the key - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_held_by_subnet.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + let chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(master_public_key_held_by_subnet.clone()), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, - }; - let chain_key_config = ChainKeyConfigInternal::from(ecdsa_config); - let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); - subnet_holding_key_record.chain_key_config = Some(chain_key_config_pb); + }); + + subnet_holding_key_record.chain_key_config = chain_key_config.map(|chain_key_config| { + let chain_key_config = ChainKeyConfigInternal::try_from(chain_key_config).unwrap(); + ChainKeyConfigPb::from(chain_key_config) + }); let subnet_id = subnet_test_id(1000); registry.maybe_apply_mutation_internal(add_fake_subnet( @@ -1192,14 +1135,18 @@ mod tests { )); let mut payload = make_empty_update_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_held_by_subnet.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + + payload.chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(master_public_key_held_by_subnet.clone()), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); - payload.ecdsa_key_signing_enable = Some(vec![key_held_by_subnet.clone()]); + + payload.chain_key_signing_enable = Some(vec![master_public_key_held_by_subnet.clone()]); registry.do_update_subnet(payload); @@ -1223,7 +1170,9 @@ mod tests { // The next payload to disable signing with the key. let mut payload = make_empty_update_payload(subnet_id); - payload.ecdsa_key_signing_disable = Some(vec![key_held_by_subnet.clone()]); + + payload.chain_key_signing_disable = Some(vec![master_public_key_held_by_subnet.clone()]); + registry.do_update_subnet(payload); // Ensure it's now removed from signing list. @@ -1246,10 +1195,7 @@ mod tests { #[test] #[should_panic( - expected = "[Registry] Proposal attempts to enable signing for ECDSA key \ - 'ecdsa:Secp256k1:existing_key_id' on Subnet 'ge6io-epiam-aaaaa-aaaap-yai', but the subnet \ - does not hold the given key. A proposal to add that key to the subnet must first be \ - separately submitted." + expected = "Proposal attempts to enable and disable signing for the same chain keys" )] fn enable_and_disable_signing_lists_should_not_have_same_keys_in_single_request() { let mut registry = invariant_compliant_registry(0); @@ -1270,90 +1216,21 @@ mod tests { .next() .expect("should contain at least one node ID"); let mut subnet_record = get_invariant_compliant_subnet_record(vec![*first_node_id]); + // Give it the key. - subnet_record.ecdsa_config = Some( - EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key.clone()], + subnet_record.chain_key_config = Some(ChainKeyConfigPb { + key_configs: vec![KeyConfigPb { + key_id: Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa( + key.clone(), + ))), + pre_signatures_to_create_in_advance: Some(1), max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - } - .into(), - ); - - let subnet_id = subnet_test_id(1000); - registry.maybe_apply_mutation_internal(add_fake_subnet( - subnet_id, - &mut subnet_list_record, - subnet_record, - &btreemap!(*first_node_id => first_dkg_pk.clone()), - )); - - let mut payload = make_empty_update_payload(subnet_id); - payload.ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], signature_request_timeout_ns: None, idkg_key_rotation_period_ms: None, }); - payload.ecdsa_key_signing_enable = Some(vec![key.clone()]); - payload.ecdsa_key_signing_disable = Some(vec![key]); - - // Should panic because we are trying to enable/disable same key - registry.do_update_subnet(payload); - } - - #[test] - #[should_panic( - expected = "Chain keys cannot be deleted. Attempted to delete chain keys \ - {Ecdsa(EcdsaKeyId { curve: Secp256k1, name: \"existing_key_id_2\" })} for subnet: \ - 'ge6io-epiam-aaaaa-aaaap-yai'" - )] - // TODO(NNS1-3022): Delete this once ecdsa_config is obsolete - fn test_deleting_ecdsa_keys_fails_legacy() { - let mut registry = invariant_compliant_registry(0); - - let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 2); - registry.maybe_apply_mutation_internal(mutate_request.mutations); - - let mut subnet_list_record = registry.get_subnet_list_record(); - - let key_1 = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "existing_key_id".to_string(), - }; - - let key_2 = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "existing_key_id_2".to_string(), - }; - - // Create the subnet we will update - let (first_node_id, first_dkg_pk) = node_ids_and_dkg_pks - .iter() - .next() - .expect("should contain at least one node ID"); - - let mut subnet_record = get_invariant_compliant_subnet_record(vec![*first_node_id]); - - // Give it the keys. - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![key_1.clone(), key_2.clone()], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - }; - { - let chain_key_config = ChainKeyConfigInternal::from(ecdsa_config.clone()); - let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); - subnet_record.chain_key_config = Some(chain_key_config_pb); - } let subnet_id = subnet_test_id(1000); - registry.maybe_apply_mutation_internal(add_fake_subnet( subnet_id, &mut subnet_list_record, @@ -1361,22 +1238,22 @@ mod tests { &btreemap!(*first_node_id => first_dkg_pk.clone()), )); - let payload = UpdateSubnetPayload { - ecdsa_config: Some(ecdsa_config.clone()), - ..make_empty_update_payload(subnet_id) - }; + let mut payload = make_empty_update_payload(subnet_id); - registry.do_update_subnet(payload.clone()); + payload.chain_key_config = Some(ChainKeyConfig { + key_configs: vec![KeyConfig { + key_id: Some(MasterPublicKeyId::Ecdsa(key.clone())), + pre_signatures_to_create_in_advance: Some(1), + max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), + }], + signature_request_timeout_ns: None, + idkg_key_rotation_period_ms: None, + }); - let payload = UpdateSubnetPayload { - ecdsa_config: Some(EcdsaConfig { - key_ids: vec![key_1.clone()], - ..ecdsa_config - }), - ..payload - }; + payload.chain_key_signing_enable = Some(vec![MasterPublicKeyId::Ecdsa(key.clone())]); + payload.chain_key_signing_disable = Some(vec![MasterPublicKeyId::Ecdsa(key.clone())]); - // Should panic because we are trying to modify the config + // Should panic because we are trying to enable/disable same key registry.do_update_subnet(payload); } @@ -1461,248 +1338,4 @@ mod tests { // Should panic because we are trying to delete an existing key registry.do_update_subnet(payload); } - - // TODO[NNS1-3022]: Replace this with a test that checks that `UpdateSubnetPayload.ecdsa_config` - // TODO[NNS1-3022]: cannot be set. - #[test] - #[should_panic( - expected = "Deprecated field ecdsa_config cannot be specified with chain_key_config." - )] - fn test_disallow_legacy_and_chain_key_ecdsa_config_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let mut registry = invariant_compliant_registry(0); - - // Make a request for the key from a subnet that does not have the key. - let subnet_id = subnet_test_id(1000); - let payload = UpdateSubnetPayload { - ecdsa_config: Some(EcdsaConfig { - key_ids: vec![key_id.clone()], - quadruples_to_create_in_advance: 111, - max_queue_size: Some(222), - signature_request_timeout_ns: Some(333), - idkg_key_rotation_period_ms: Some(444), - }), - chain_key_config: Some(ChainKeyConfig { - key_configs: vec![KeyConfig { - key_id: Some(MasterPublicKeyId::Ecdsa(key_id)), - pre_signatures_to_create_in_advance: Some(111), - max_queue_size: Some(222), - }], - signature_request_timeout_ns: Some(333), - idkg_key_rotation_period_ms: Some(444), - }), - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - // TODO[NNS1-3022]: Replace this with a test that checks that - // TODO[NNS1-3022]: `UpdateSubnetPayload.ecdsa_key_signing_{en,dis}able` cannot be set. - #[test] - #[should_panic( - expected = "Deprecated fields ecdsa_key_signing_{en,dis}able should not be used \ - together with chain_key_signing_{en,dis}able." - )] - fn test_disallow_legacy_and_chain_key_ecdsa_signing_enable_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let mut registry = invariant_compliant_registry(0); - - let subnet_id = subnet_test_id(1000); - let payload = UpdateSubnetPayload { - ecdsa_key_signing_enable: Some(vec![key_id.clone()]), - chain_key_signing_enable: Some(vec![MasterPublicKeyId::Ecdsa(key_id)]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - // TODO[NNS1-3022]: Remove this test. - #[test] - #[should_panic( - expected = "Deprecated fields ecdsa_key_signing_{en,dis}able should not be used \ - together with chain_key_signing_{en,dis}able." - )] - fn test_disallow_legacy_and_chain_key_ecdsa_signing_disable_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let mut registry = invariant_compliant_registry(0); - - let subnet_id = subnet_test_id(1000); - let payload = UpdateSubnetPayload { - ecdsa_key_signing_disable: Some(vec![key_id.clone()]), - chain_key_signing_disable: Some(vec![MasterPublicKeyId::Ecdsa(key_id)]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - // TODO[NNS1-3022]: Remove this test. - #[test] - #[should_panic( - expected = "Deprecated fields ecdsa_key_signing_{en,dis}able should not be used \ - together with chain_key_signing_{en,dis}able." - )] - fn test_disallow_legacy_enable_and_chain_key_ecdsa_signing_disable_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let mut registry = invariant_compliant_registry(0); - - let subnet_id = subnet_test_id(1000); - let payload = UpdateSubnetPayload { - ecdsa_key_signing_enable: Some(vec![key_id.clone()]), - chain_key_signing_disable: Some(vec![MasterPublicKeyId::Ecdsa(key_id)]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - // TODO[NNS1-3022]: Remove this test. - #[test] - #[should_panic( - expected = "Deprecated fields ecdsa_key_signing_{en,dis}able should not be used \ - together with chain_key_signing_{en,dis}able." - )] - fn test_disallow_legacy_disable_and_chain_key_ecdsa_signing_enable_specification_together() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let mut registry = invariant_compliant_registry(0); - - let subnet_id = subnet_test_id(1000); - let payload = UpdateSubnetPayload { - ecdsa_key_signing_disable: Some(vec![key_id.clone()]), - chain_key_signing_enable: Some(vec![MasterPublicKeyId::Ecdsa(key_id)]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - // TODO[NNS1-3022]: Remove this test. - #[test] - #[should_panic( - expected = "Proposal attempts to enable and disable signing for the same ECDSA keys" - )] - fn test_disallow_ecdsa_key_signing_disable_and_enable_for_same_key_legacy() { - let key_id = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }; - let subnet_id = subnet_test_id(1000); - - let mut registry = invariant_compliant_registry(0); - - // Make sure the registry has the expected subnet record. - { - let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 2); - registry.maybe_apply_mutation_internal(mutate_request.mutations); - let mut subnet_list_record = registry.get_subnet_list_record(); - let (first_node_id, first_dkg_pk) = node_ids_and_dkg_pks - .iter() - .next() - .expect("should contain at least one node ID"); - let mut subnet_record = get_invariant_compliant_subnet_record(vec![*first_node_id]); - subnet_record.chain_key_config = Some(ChainKeyConfigPb { - key_configs: vec![KeyConfigPb { - key_id: Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa( - key_id.clone(), - ))), - pre_signatures_to_create_in_advance: Some(111), - max_queue_size: Some(222), - }], - signature_request_timeout_ns: Some(333), - idkg_key_rotation_period_ms: Some(444), - }); - registry.maybe_apply_mutation_internal(add_fake_subnet( - subnet_id, - &mut subnet_list_record, - subnet_record, - &btreemap!(*first_node_id => first_dkg_pk.clone()), - )); - } - - let payload = UpdateSubnetPayload { - ecdsa_key_signing_disable: Some(vec![key_id.clone()]), - ecdsa_key_signing_enable: Some(vec![key_id]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } - - #[test] - #[should_panic( - expected = "Proposal attempts to enable and disable signing for the same chain keys" - )] - fn test_disallow_chain_key_signing_disable_and_enable_for_same_key() { - let key_id = MasterPublicKeyId::Ecdsa(EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "fake_key_id".to_string(), - }); - let subnet_id = subnet_test_id(1000); - - let mut registry = invariant_compliant_registry(0); - - // Make sure the registry has the expected subnet record. - { - let (mutate_request, node_ids_and_dkg_pks) = prepare_registry_with_nodes(1, 2); - registry.maybe_apply_mutation_internal(mutate_request.mutations); - let mut subnet_list_record = registry.get_subnet_list_record(); - let (first_node_id, first_dkg_pk) = node_ids_and_dkg_pks - .iter() - .next() - .expect("should contain at least one node ID"); - let mut subnet_record = get_invariant_compliant_subnet_record(vec![*first_node_id]); - subnet_record.chain_key_config = Some(ChainKeyConfigPb { - key_configs: vec![KeyConfigPb { - key_id: Some(MasterPublicKeyIdPb::from(&key_id)), - pre_signatures_to_create_in_advance: Some(111), - max_queue_size: Some(222), - }], - signature_request_timeout_ns: Some(333), - idkg_key_rotation_period_ms: Some(444), - }); - registry.maybe_apply_mutation_internal(add_fake_subnet( - subnet_id, - &mut subnet_list_record, - subnet_record, - &btreemap!(*first_node_id => first_dkg_pk.clone()), - )); - } - - let payload = UpdateSubnetPayload { - chain_key_signing_disable: Some(vec![key_id.clone()]), - chain_key_signing_enable: Some(vec![key_id]), - ecdsa_config: None, - chain_key_config: None, - ..make_empty_update_payload(subnet_id) - }; - - registry.do_update_subnet(payload); - } } diff --git a/rs/registry/canister/tests/common/test_helpers.rs b/rs/registry/canister/tests/common/test_helpers.rs index e218267817d..16e415e05b5 100644 --- a/rs/registry/canister/tests/common/test_helpers.rs +++ b/rs/registry/canister/tests/common/test_helpers.rs @@ -23,9 +23,7 @@ use ic_registry_keys::{ }; use ic_registry_proto_data_provider::ProtoRegistryDataProvider; use ic_registry_routing_table::RoutingTable; -use ic_registry_subnet_features::{ - ChainKeyConfig, EcdsaConfig, KeyConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE, -}; +use ic_registry_subnet_features::{ChainKeyConfig, KeyConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE}; use ic_registry_transport::pb::v1::RegistryAtomicMutateRequest; use ic_types::ReplicaVersion; use registry_canister::init::RegistryCanisterInitPayloadBuilder; @@ -46,31 +44,6 @@ pub async fn get_subnet_record(registry: &Canister<'_>, subnet_id: SubnetId) -> get_value_or_panic::(registry, make_subnet_record_key(subnet_id).as_bytes()).await } -pub fn get_subnet_holding_ecdsa_keys( - ecdsa_key_ids: &[EcdsaKeyId], - node_ids: Vec, -) -> SubnetRecord { - let mut record: SubnetRecord = CreateSubnetPayload { - unit_delay_millis: 10, - replica_version_id: ReplicaVersion::default().into(), - node_ids, - ..Default::default() - } - .into(); - - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: ecdsa_key_ids.to_vec(), - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - }; - record.chain_key_config = Some(ChainKeyConfig::from(ecdsa_config.clone()).into()); - record.ecdsa_config = Some(ecdsa_config.into()); - - record -} - pub fn get_subnet_holding_chain_keys( key_ids: Vec, node_ids: Vec, @@ -267,7 +240,7 @@ pub async fn get_cup_contents( } /// Requests an ECDSA public key several times until it succeeds. -pub async fn wait_for_ecdsa_setup( +async fn wait_for_ecdsa_setup( runtime: &Runtime, calling_canister: &Canister<'_>, key_id: &EcdsaKeyId, @@ -301,7 +274,7 @@ pub async fn wait_for_ecdsa_setup( } /// Requests a Schnorr public key several times until it succeeds. -pub async fn wait_for_schnorr_setup( +async fn wait_for_schnorr_setup( runtime: &Runtime, calling_canister: &Canister<'_>, key_id: &SchnorrKeyId, diff --git a/rs/registry/canister/tests/create_subnet.rs b/rs/registry/canister/tests/create_subnet.rs index 02205428b40..db70b22dc4a 100644 --- a/rs/registry/canister/tests/create_subnet.rs +++ b/rs/registry/canister/tests/create_subnet.rs @@ -4,7 +4,7 @@ use canister_test::Runtime; use common::test_helpers::{ get_added_subnet, get_cup_contents, get_subnet_list_record, prepare_registry_with_nodes, set_up_universal_canister_as_governance, setup_registry_synced_with_fake_client, - wait_for_chain_key_setup, wait_for_ecdsa_setup, + wait_for_chain_key_setup, }; use dfn_candid::candid; use ic_base_types::{PrincipalId, SubnetId}; @@ -22,8 +22,8 @@ use ic_nns_test_utils::{ registry::{invariant_compliant_mutation_as_atomic_req, INITIAL_MUTATION_ID}, }; use ic_protobuf::registry::subnet::v1::{ - ChainKeyConfig as ChainKeyConfigPb, KeyConfig as KeyConfigPb, - SubnetListRecord as SubnetListRecordPb, SubnetRecord as SubnetRecordPb, + ChainKeyConfig as ChainKeyConfigPb, SubnetListRecord as SubnetListRecordPb, + SubnetRecord as SubnetRecordPb, }; use ic_protobuf::types::v1::MasterPublicKeyId as MasterPublicKeyIdPb; use ic_registry_keys::{make_subnet_list_record_key, make_subnet_record_key}; @@ -35,7 +35,6 @@ use ic_registry_transport::{pb::v1::RegistryAtomicMutateRequest, upsert}; use ic_replica_tests::{canister_test_with_config_async, get_ic_config}; use ic_types::{NodeId, ReplicaVersion}; use prost::Message; -use registry_canister::mutations::do_create_subnet::{EcdsaInitialConfig, EcdsaKeyRequest}; use registry_canister::{ init::RegistryCanisterInitPayloadBuilder, mutations::do_create_subnet::{ @@ -209,162 +208,6 @@ fn test_accepted_proposal_mutates_the_registry_some_subnets_present() { }); } -// TODO[NNS1-3022]: Remove this test. -#[test] -fn test_accepted_proposal_with_ecdsa_gets_keys_from_other_subnet_legacy() { - let ic_config = get_ic_config(); - - let (config, _tmpdir) = Config::temp_config(); - canister_test_with_config_async(config, ic_config, |local_runtime| async move { - let data_provider = local_runtime.registry_data_provider.clone(); - let fake_client = local_runtime.registry_client.clone(); - - let runtime = Runtime::Local(local_runtime); - - let key_1 = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "foo-bar".to_string(), - }; - - let (init_mutate, node_ids) = prepare_registry_with_nodes(5, INITIAL_MUTATION_ID); - - // Here we discover the IC's subnet ID (from our test harness) - // and then modify it to hold the key. - let subnet_list_record = SubnetListRecordPb::decode( - fake_client - .get_value( - &make_subnet_list_record_key(), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - - let subnet_principals = subnet_list_record - .subnets - .iter() - .map(|record| PrincipalId::try_from(record).unwrap()) - .collect::>(); - let system_subnet_principal = subnet_principals.first().unwrap(); - - let system_subnet_id = SubnetId::new(*system_subnet_principal); - let mut subnet_record = SubnetRecordPb::decode( - fake_client - .get_value( - &make_subnet_record_key(system_subnet_id), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - - subnet_record.chain_key_config = Some(ChainKeyConfigPb::from(ChainKeyConfig { - key_configs: vec![KeyConfigInternal { - key_id: MasterPublicKeyId::Ecdsa(key_1.clone()), - pre_signatures_to_create_in_advance: 100, - max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, - }], - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - })); - - let modify_base_subnet_mutate = RegistryAtomicMutateRequest { - mutations: vec![upsert( - make_subnet_record_key(system_subnet_id), - subnet_record.encode_to_vec(), - )], - preconditions: vec![], - }; - - let registry = setup_registry_synced_with_fake_client( - &runtime, - fake_client.clone(), - data_provider, - vec![init_mutate, modify_base_subnet_mutate], - ) - .await; - - // Install the universal canister in place of the governance canister - let fake_governance_canister = set_up_universal_canister_as_governance(&runtime).await; - - wait_for_ecdsa_setup(&runtime, &fake_governance_canister, &key_1).await; - - // First, we get the initial list of subnets - let initial_subnet_list_record = get_subnet_list_record(®istry).await; - - // Create payload message with EcdsaKeyRequest - let signature_request_timeout_ns = Some(12345); - let idkg_key_rotation_period_ms = Some(12345); - let payload = { - let mut payload = make_create_subnet_payload(node_ids.clone()); - payload.ecdsa_config = Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 101, - keys: vec![EcdsaKeyRequest { - key_id: key_1.clone(), - subnet_id: Some(*system_subnet_principal), - }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - }); - payload - }; - - // When we create subnet with ecdsa_keys enabled - try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "create_subnet", - Encode!(&payload).unwrap(), - ) - .await - .unwrap(); - - // We get a new subnet - let (subnet_id, subnet_record) = - get_added_subnet(®istry, &initial_subnet_list_record).await; - - // Registry adds those keys to the CUP - let cup_contents = get_cup_contents(®istry, subnet_id).await; - - // Check EcdsaInitializations - let dealings = &cup_contents.chain_key_initializations; - assert_eq!(dealings.len(), 1); - assert_eq!( - dealings[0_usize].key_id, - Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa( - EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "foo-bar".to_string(), - } - ))) - ); - - // Check ChainKeyConfigPb is correctly updated - let chain_key_config = subnet_record.chain_key_config.unwrap(); - assert_eq!( - chain_key_config.signature_request_timeout_ns, - signature_request_timeout_ns - ); - assert_eq!( - chain_key_config.idkg_key_rotation_period_ms, - idkg_key_rotation_period_ms - ); - assert_eq!( - chain_key_config.key_configs, - vec![KeyConfigPb { - key_id: Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa(key_1))), - pre_signatures_to_create_in_advance: Some(101), - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - }], - ); - }); -} - fn test_accepted_proposal_with_chain_key_gets_keys_from_other_subnet(key_id: MasterPublicKeyId) { let ic_config = get_ic_config(); diff --git a/rs/registry/canister/tests/recover_subnet.rs b/rs/registry/canister/tests/recover_subnet.rs index 95db6836827..8c7788dd998 100644 --- a/rs/registry/canister/tests/recover_subnet.rs +++ b/rs/registry/canister/tests/recover_subnet.rs @@ -27,7 +27,7 @@ use ic_registry_keys::{ make_subnet_list_record_key, make_subnet_record_key, }; use ic_registry_subnet_features::{ - ChainKeyConfig, EcdsaConfig, KeyConfig as KeyConfigInternal, DEFAULT_ECDSA_MAX_QUEUE_SIZE, + ChainKeyConfig, KeyConfig as KeyConfigInternal, DEFAULT_ECDSA_MAX_QUEUE_SIZE, }; use ic_registry_transport::{insert, pb::v1::RegistryAtomicMutateRequest, upsert}; use ic_replica_tests::{canister_test_with_config_async, get_ic_config}; @@ -49,7 +49,7 @@ use rand::{CryptoRng, Rng, RngCore}; use registry_canister::{ init::RegistryCanisterInitPayloadBuilder, mutations::{ - do_create_subnet::{CreateSubnetPayload, EcdsaInitialConfig, EcdsaKeyRequest}, + do_create_subnet::CreateSubnetPayload, do_recover_subnet::{ InitialChainKeyConfig, KeyConfig, KeyConfigRequest, RecoverSubnetPayload, }, @@ -64,9 +64,9 @@ use std::{ mod common; use crate::common::test_helpers::prepare_registry_with_nodes_and_valid_pks; use common::test_helpers::{ - get_cup_contents, get_subnet_holding_chain_keys, get_subnet_holding_ecdsa_keys, - get_subnet_record, set_up_universal_canister_as_governance, - setup_registry_synced_with_fake_client, wait_for_chain_key_setup, wait_for_ecdsa_setup, + get_cup_contents, get_subnet_holding_chain_keys, get_subnet_record, + set_up_universal_canister_as_governance, setup_registry_synced_with_fake_client, + wait_for_chain_key_setup, }; use ic_nns_test_utils::registry::create_subnet_threshold_signing_pubkey_and_cup_mutations; @@ -217,228 +217,6 @@ fn test_recover_subnet_with_replacement_nodes() { }); } -// TODO[NNS1-3022]: Remove this test. -#[test] -fn test_recover_subnet_gets_ecdsa_keys_when_needed_legacy() { - let ic_config = get_ic_config(); - let (config, _tmpdir) = Config::temp_config(); - canister_test_with_config_async(config, ic_config, |local_runtime| async move { - let data_provider = local_runtime.registry_data_provider.clone(); - let fake_client = local_runtime.registry_client.clone(); - - let runtime = Runtime::Local(local_runtime); - // get some nodes for our tests - let (init_mutate, node_ids_and_valid_pks) = prepare_registry_with_nodes_and_valid_pks(5, 0); - let mut node_ids: Vec = node_ids_and_valid_pks.keys().cloned().collect(); - - let key_1 = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "foo-bar".to_string(), - }; - - let subnet_to_recover_nodes = vec![node_ids.pop().unwrap()]; - let subnet_to_recover: SubnetRecord = CreateSubnetPayload { - node_ids: subnet_to_recover_nodes.clone(), - unit_delay_millis: 10, - gossip_retransmission_request_ms: 10_000, - gossip_registry_poll_period_ms: 2000, - gossip_pfn_evaluation_period_ms: 50, - gossip_receive_check_cache_size: 1, - gossip_max_duplicity: 1, - gossip_max_chunk_wait_ms: 200, - gossip_max_artifact_streams_per_peer: 1, - replica_version_id: ReplicaVersion::default().into(), - ..CreateSubnetPayload::default() - } - .into(); - - // Here we discover the IC's subnet ID (from our test harness) - // and then modify it to hold the key and sign for it. - let mut subnet_list_record = SubnetListRecord::decode( - fake_client - .get_value( - &make_subnet_list_record_key(), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - - let subnet_to_recover_subnet_id = subnet_test_id(1003); - - subnet_list_record - .subnets - .push(subnet_to_recover_subnet_id.get().into_vec()); - - let subnet_principals = subnet_list_record - .subnets - .iter() - .map(|record| PrincipalId::try_from(record).unwrap()) - .collect::>(); - let system_subnet_principal = subnet_principals.first().unwrap(); - - let system_subnet_id = SubnetId::new(*system_subnet_principal); - let mut subnet_record = SubnetRecord::decode( - fake_client - .get_value( - &make_subnet_record_key(system_subnet_id), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - subnet_record.chain_key_config = Some(ChainKeyConfigPb::from(ChainKeyConfig { - key_configs: vec![KeyConfigInternal { - key_id: MasterPublicKeyId::Ecdsa(key_1.clone()), - pre_signatures_to_create_in_advance: 100, - max_queue_size: DEFAULT_ECDSA_MAX_QUEUE_SIZE, - }], - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - })); - - let modify_base_subnet_mutate = RegistryAtomicMutateRequest { - mutations: vec![upsert( - make_subnet_record_key(system_subnet_id), - subnet_record.encode_to_vec(), - )], - preconditions: vec![], - }; - - let mut subnet_threshold_signing_pk_and_cup_mutations = - create_subnet_threshold_signing_pubkey_and_cup_mutations( - subnet_to_recover_subnet_id, - &node_ids_and_valid_pks - .iter() - .map(|(node_id, valid_pks)| { - (*node_id, valid_pks.dkg_dealing_encryption_key().clone()) - }) - .collect(), - ); - - // Add the subnet we are recovering holding requested keys - // Note, because these mutations are also synced with underlying IC registry, they - // need a CUP - let mut mutations = vec![ - upsert( - make_subnet_record_key(subnet_to_recover_subnet_id).into_bytes(), - subnet_to_recover.encode_to_vec(), - ), - upsert( - make_subnet_list_record_key().into_bytes(), - subnet_list_record.encode_to_vec(), - ), - ]; - mutations.append(&mut subnet_threshold_signing_pk_and_cup_mutations); - - let add_subnets_mutate = RegistryAtomicMutateRequest { - preconditions: vec![], - mutations, - }; - - let registry = setup_registry_synced_with_fake_client( - &runtime, - fake_client, - data_provider, - vec![ - init_mutate, - add_subnets_mutate, - modify_base_subnet_mutate, - // ecdsa_signing_subnets_mutate, - ], - ) - .await; - - // Then we need to ensure the CUP for our subnet under test - // does not contain the ecdsa_initializations, since we will be asserting those were added - let before_recover_cup_contents = - get_cup_contents(®istry, subnet_to_recover_subnet_id).await; - assert_eq!(before_recover_cup_contents.ecdsa_initializations, vec![]); - assert_eq!( - before_recover_cup_contents.chain_key_initializations, - vec![] - ); - - // Install the universal canister in place of the governance canister - let fake_governance_canister = set_up_universal_canister_as_governance(&runtime).await; - println!("waiting for ecdsa setup"); - - wait_for_ecdsa_setup(&runtime, &fake_governance_canister, &key_1).await; - - let signature_request_timeout_ns = Some(12345); - let idkg_key_rotation_period_ms = Some(12345); - let payload = RecoverSubnetPayload { - subnet_id: subnet_to_recover_subnet_id.get(), - height: 10, - time_ns: 1200, - state_hash: vec![10, 20, 30], - replacement_nodes: None, - registry_store_uri: None, - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![EcdsaKeyRequest { - key_id: key_1.clone(), - subnet_id: Some(system_subnet_id.get()), - }], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - }), - chain_key_config: None, // We test that the legacy proposals still work. - }; - - // When we recover a subnet with specified ecdsa_keys - try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "recover_subnet", - Encode!(&payload).unwrap(), - ) - .await - .unwrap(); - - let cup_contents = get_cup_contents(®istry, subnet_to_recover_subnet_id).await; - - // Check chain key initializations. - let dealings = &cup_contents.chain_key_initializations; - assert_eq!(dealings.len(), 1); - assert_eq!( - dealings[0_usize].key_id, - Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa( - EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "foo-bar".to_string(), - } - ))) - ); - - // Check ChainKeyConfig is correctly updated - let subnet_record = get_subnet_record(®istry, subnet_to_recover_subnet_id).await; - let chain_key_config = subnet_record.chain_key_config.unwrap(); - assert_eq!( - chain_key_config.signature_request_timeout_ns, - signature_request_timeout_ns - ); - assert_eq!( - chain_key_config.idkg_key_rotation_period_ms, - idkg_key_rotation_period_ms - ); - - assert_eq!( - chain_key_config.key_configs, - vec![KeyConfigPb { - key_id: Some(MasterPublicKeyIdPb::from(&MasterPublicKeyId::Ecdsa(key_1))), - pre_signatures_to_create_in_advance: Some(1), - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - }] - ); - }); -} - fn test_recover_subnet_gets_chain_keys_when_needed(key_id: MasterPublicKeyId) { let ic_config = get_ic_config(); let (config, _tmpdir) = Config::temp_config(); @@ -670,216 +448,6 @@ fn test_recover_subnet_gets_schnorr_keys_when_needed() { test_recover_subnet_gets_chain_keys_when_needed(key_id); } -// TODO[NNS1-3022]: Delete this test once `RecoverSubnetPayload.ecdsa_config` is obsolete. -#[test] -fn test_recover_subnet_without_ecdsa_key_removes_it_from_signing_list_legacy() { - let ic_config = get_ic_config(); - let (config, _tmpdir) = Config::temp_config(); - canister_test_with_config_async(config, ic_config, |local_runtime| async move { - let data_provider = local_runtime.registry_data_provider.clone(); - let fake_client = local_runtime.registry_client.clone(); - - let runtime = Runtime::Local(local_runtime); - // get some nodes for our tests - let (init_mutate, node_ids_and_valid_pks) = prepare_registry_with_nodes_and_valid_pks(5, 0); - let mut node_ids: Vec = node_ids_and_valid_pks.keys().cloned().collect(); - - let key_1 = EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "foo-bar".to_string(), - }; - - let subnet_to_recover_nodes = vec![node_ids.pop().unwrap()]; - let subnet_to_recover = - get_subnet_holding_ecdsa_keys(&[key_1.clone()], subnet_to_recover_nodes.clone()); - - // Here we discover the IC's subnet ID (from our test harness) - // and then modify it to hold the key and sign for it. - let mut subnet_list_record = SubnetListRecord::decode( - fake_client - .get_value( - &make_subnet_list_record_key(), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - - let subnet_to_recover_subnet_id = subnet_test_id(1003); - - subnet_list_record - .subnets - .push(subnet_to_recover_subnet_id.get().into_vec()); - - let subnet_principals = subnet_list_record - .subnets - .iter() - .map(|record| PrincipalId::try_from(record).unwrap()) - .collect::>(); - let system_subnet_principal = subnet_principals.first().unwrap(); - - let system_subnet_id = SubnetId::new(*system_subnet_principal); - let mut subnet_record = SubnetRecord::decode( - fake_client - .get_value( - &make_subnet_record_key(system_subnet_id), - fake_client.get_latest_version(), - ) - .unwrap() - .unwrap() - .as_slice(), - ) - .unwrap(); - - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 1, - key_ids: vec![(key_1.clone())], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns: None, - idkg_key_rotation_period_ms: None, - }; - let chain_key_config = ChainKeyConfig::from(ecdsa_config); - let chain_key_config_pb = ChainKeyConfigPb::from(chain_key_config); - subnet_record.chain_key_config = Some(chain_key_config_pb); - - let modify_base_subnet_mutate = RegistryAtomicMutateRequest { - mutations: vec![upsert( - make_subnet_record_key(system_subnet_id), - subnet_record.encode_to_vec(), - )], - preconditions: vec![], - }; - - let mut subnet_threshold_signing_pk_and_cup_mutations = - create_subnet_threshold_signing_pubkey_and_cup_mutations( - subnet_to_recover_subnet_id, - &node_ids_and_valid_pks - .iter() - .map(|(node_id, valid_pks)| { - (*node_id, valid_pks.dkg_dealing_encryption_key().clone()) - }) - .collect(), - ); - - // Add the subnet we are recovering holding requested keys - // Note, because these mutations are also synced with underlying IC registry, they - // need a CUP - let mut mutations = vec![ - upsert( - make_subnet_record_key(subnet_to_recover_subnet_id).into_bytes(), - subnet_to_recover.encode_to_vec(), - ), - upsert( - make_subnet_list_record_key().into_bytes(), - subnet_list_record.encode_to_vec(), - ), - ]; - mutations.append(&mut subnet_threshold_signing_pk_and_cup_mutations); - - let add_subnets_mutate = RegistryAtomicMutateRequest { - preconditions: vec![], - mutations, - }; - - // Enable signing with the recovering subnet - we will later check that - // this subnet is removed from the signing subnet list. - let ecdsa_signing_subnets_mutate = RegistryAtomicMutateRequest { - preconditions: vec![], - mutations: vec![insert( - make_chain_key_signing_subnet_list_key(&MasterPublicKeyId::Ecdsa(key_1.clone())), - ChainKeySigningSubnetList { - subnets: vec![subnet_id_into_protobuf(subnet_to_recover_subnet_id)], - } - .encode_to_vec(), - )], - }; - - let registry = setup_registry_synced_with_fake_client( - &runtime, - fake_client, - data_provider, - vec![ - init_mutate, - add_subnets_mutate, - modify_base_subnet_mutate, - ecdsa_signing_subnets_mutate, - ], - ) - .await; - - // Then we need to ensure the CUP for our subnet under test - // does not contain the ecdsa_initializations, since we will be asserting those were added - let before_recover_cup_contents = - get_cup_contents(®istry, subnet_to_recover_subnet_id).await; - assert_eq!(before_recover_cup_contents.ecdsa_initializations, vec![]); - assert_eq!( - before_recover_cup_contents.chain_key_initializations, - vec![] - ); - - // Install the universal canister in place of the governance canister - let fake_governance_canister = set_up_universal_canister_as_governance(&runtime).await; - - let signature_request_timeout_ns = Some(12345); - let idkg_key_rotation_period_ms = Some(12345); - let payload = RecoverSubnetPayload { - subnet_id: subnet_to_recover_subnet_id.get(), - height: 10, - time_ns: 1200, - state_hash: vec![10, 20, 30], - replacement_nodes: None, - registry_store_uri: None, - ecdsa_config: Some(EcdsaInitialConfig { - quadruples_to_create_in_advance: 1, - keys: vec![], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - }), - chain_key_config: None, - }; - - // When we recover a subnet with specified ecdsa_keys - try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "recover_subnet", - Encode!(&payload).unwrap(), - ) - .await - .unwrap(); - - let cup_contents = get_cup_contents(®istry, subnet_to_recover_subnet_id).await; - - // Check EcdsaInitializations - let dealings = &cup_contents.ecdsa_initializations; - assert_eq!(dealings.len(), 0); - - // Check ChainKeyConfig is correctly updated - let subnet_record = get_subnet_record(®istry, subnet_to_recover_subnet_id).await; - let chain_key_config = subnet_record.chain_key_config.unwrap(); - - assert_eq!( - chain_key_config.signature_request_timeout_ns, - signature_request_timeout_ns - ); - assert_eq!( - chain_key_config.idkg_key_rotation_period_ms, - idkg_key_rotation_period_ms - ); - - assert_eq!(chain_key_config.key_configs, vec![]); - - // Check ecdsa_signing_subnets_list for key_1 is empty now. - assert_eq!( - chain_key_signing_subnet_list(®istry, &MasterPublicKeyId::Ecdsa(key_1)).await, - ChainKeySigningSubnetList { subnets: vec![] } - ) - }); -} - fn test_recover_subnet_without_chain_key_removes_it_from_signing_list(key_id: MasterPublicKeyId) { let ic_config = get_ic_config(); let (config, _tmpdir) = Config::temp_config(); diff --git a/rs/registry/canister/tests/update_subnet.rs b/rs/registry/canister/tests/update_subnet.rs index 3a171d14cda..dea72bdfdf2 100644 --- a/rs/registry/canister/tests/update_subnet.rs +++ b/rs/registry/canister/tests/update_subnet.rs @@ -14,12 +14,10 @@ use ic_nns_test_utils::{ registry::{get_value_or_panic, invariant_compliant_mutation_as_atomic_req}, }; use ic_protobuf::registry::crypto::v1::ChainKeySigningSubnetList; -use ic_protobuf::registry::subnet::v1::{ - ChainKeyConfig as ChainKeyConfigPb, EcdsaConfig as EcdsaConfigPb, SubnetRecord, -}; +use ic_protobuf::registry::subnet::v1::{ChainKeyConfig as ChainKeyConfigPb, SubnetRecord}; use ic_registry_keys::{make_chain_key_signing_subnet_list_key, make_subnet_record_key}; use ic_registry_subnet_features::{ - ChainKeyConfig as ChainKeyConfigInternal, EcdsaConfig, DEFAULT_ECDSA_MAX_QUEUE_SIZE, + ChainKeyConfig as ChainKeyConfigInternal, DEFAULT_ECDSA_MAX_QUEUE_SIZE, }; use ic_registry_subnet_type::SubnetType; use ic_registry_transport::{insert, pb::v1::RegistryAtomicMutateRequest}; @@ -34,14 +32,6 @@ use std::str::FromStr; mod common; use common::test_helpers::get_subnet_record; -// TODO[NNS1-2986]: Remove, replacing with `make_ecdsa_master_public_key`. -fn make_ecdsa_key(name: &str) -> EcdsaKeyId { - EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: name.to_string(), - } -} - #[test] fn test_the_anonymous_user_cannot_update_a_subnets_configuration() { local_test_on_nns_subnet(|runtime| async move { @@ -390,233 +380,6 @@ fn test_the_governance_canister_can_update_a_subnets_configuration() { }); } -// TODO[NNS1-3102]: Remove this test. -#[test] -fn test_subnets_configuration_ecdsa_fields_are_updated_correctly_legacy() { - const ENABLE_BEFORE_ADDING_REJECT_MSG: &str = "Canister rejected with \ - message: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister \ - called `ic0.trap` with message: Panicked at '[Registry] Proposal attempts to enable \ - signing for ECDSA key 'ecdsa:Secp256k1:key_id_1' on Subnet \ - 'bn3el-jdvcs-a3syn-gyqwo-umlu3-avgud-vq6yl-hunln-3jejb-226vq-mae', but the \ - subnet does not hold the given key. A proposal to add that key to the subnet \ - must first be separately submitted.'"; - - const NO_CHAIN_KEY_CONFIG_REJECT_MSG: &str = "Canister rejected with \ - message: IC0503: Error from Canister rwlgt-iiaaa-aaaaa-aaaaa-cai: Canister \ - called `ic0.trap` with message: Panicked at '[Registry] Proposal attempts to enable \ - signing for ECDSA key 'ecdsa:Secp256k1:key_id_1' \ - on Subnet 'bn3el-jdvcs-a3syn-gyqwo-umlu3-avgud-vq6yl-hunln-3jejb-226vq-mae', \ - but the subnet does not hold the given key. A proposal to add that key to the subnet \ - must first be separately submitted.'"; - - local_test_on_nns_subnet(|runtime| async move { - let subnet_id = SubnetId::from( - PrincipalId::from_str( - "bn3el-jdvcs-a3syn-gyqwo-umlu3-avgud-vq6yl-hunln-3jejb-226vq-mae", - ) - .unwrap(), - ); - - let subnet_record = SubnetRecord { - membership: vec![], - max_ingress_bytes_per_message: 60 * 1024 * 1024, - max_ingress_messages_per_block: 1000, - max_block_payload_size: 4 * 1024 * 1024, - unit_delay_millis: 500, - initial_notary_delay_millis: 1500, - replica_version_id: ReplicaVersion::default().into(), - dkg_interval_length: 0, - dkg_dealings_per_block: 1, - start_as_nns: false, - subnet_type: SubnetType::Application.into(), - is_halted: false, - halt_at_cup_height: false, - features: None, - max_number_of_canisters: 0, - ssh_readonly_access: vec![], - ssh_backup_access: vec![], - ecdsa_config: None, - chain_key_config: None, - }; - - // Just create the registry canister and wait until the subnet_handler ID is - // known to install and initialize it so that it can be authorized to make - // mutations to the registry. - let registry = set_up_registry_canister( - &runtime, - RegistryCanisterInitPayloadBuilder::new() - .push_init_mutate_request(invariant_compliant_mutation_as_atomic_req(0)) - .push_init_mutate_request(RegistryAtomicMutateRequest { - mutations: vec![insert( - make_subnet_record_key(subnet_id).as_bytes(), - subnet_record.encode_to_vec(), - )], - preconditions: vec![], - }) - .build(), - ) - .await; - - // Install the universal canister in place of the governance canister - let fake_governance_canister = set_up_universal_canister(&runtime).await; - // Since it takes the id reserved for the governance canister, it can impersonate - // it - assert_eq!( - fake_governance_canister.canister_id(), - ic_nns_constants::GOVERNANCE_CANISTER_ID - ); - - let signature_request_timeout_ns = Some(12345); - let idkg_key_rotation_period_ms = Some(12345); - - let ecdsa_config = Some(EcdsaConfig { - quadruples_to_create_in_advance: 10, - key_ids: vec![make_ecdsa_key("key_id_1")], - max_queue_size: Some(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - }); - - // update payload message - let mut payload = UpdateSubnetPayload { - ecdsa_config: ecdsa_config.clone(), - ecdsa_key_signing_enable: Some(vec![make_ecdsa_key("key_id_1")]), - ..empty_update_subnet_payload(subnet_id) - }; - - let response = try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "update_subnet", - Encode!(&payload).unwrap(), - ) - .await; - let error_text = assert_matches!(response, Err(error_text) => error_text); - assert!( - error_text.starts_with(ENABLE_BEFORE_ADDING_REJECT_MSG), - "Unexpected error: `{}` (does not start with `{}`).", - error_text, - ENABLE_BEFORE_ADDING_REJECT_MSG, - ); - - let new_subnet_record = get_value_or_panic::( - ®istry, - make_subnet_record_key(subnet_id).as_bytes(), - ) - .await; - - // There should be no change - assert_eq!(new_subnet_record, subnet_record); - - // Change one field at a time in this payload - payload = UpdateSubnetPayload { - ecdsa_config: None, - ecdsa_key_signing_enable: Some(vec![make_ecdsa_key("key_id_1")]), - ..empty_update_subnet_payload(subnet_id) - }; - - let response = try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "update_subnet", - Encode!(&payload).unwrap(), - ) - .await; - - let err_text = assert_matches!(response, Err(err_text) => err_text); - - assert!( - err_text.contains(NO_CHAIN_KEY_CONFIG_REJECT_MSG), - "Error `{}` does not contain expected substring\n{}", - err_text, - NO_CHAIN_KEY_CONFIG_REJECT_MSG, - ); - - let new_subnet_record = get_value_or_panic::( - ®istry, - make_subnet_record_key(subnet_id).as_bytes(), - ) - .await; - - // There should be no change - assert_eq!(new_subnet_record, subnet_record); - - // Trying again, this time in the correct order - payload = UpdateSubnetPayload { - ecdsa_config: ecdsa_config.clone(), - ecdsa_key_signing_enable: None, - ..empty_update_subnet_payload(subnet_id) - }; - - assert!(try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "update_subnet", - Encode!(&payload).unwrap() - ) - .await - .is_ok()); - - let new_subnet_record = get_value_or_panic::( - ®istry, - make_subnet_record_key(subnet_id).as_bytes(), - ) - .await; - - // Should see the new value for the config reflected - let ecdsa_config_pb = ecdsa_config.clone().map(EcdsaConfigPb::from); - let chain_key_config_pb = ecdsa_config_pb.clone().map(ChainKeyConfigPb::from); - assert_eq!( - new_subnet_record, - SubnetRecord { - ecdsa_config: None, // obsolete (chain_key_config is used instead now) - chain_key_config: chain_key_config_pb, - ..subnet_record - } - ); - - // This update should enable signing on our subnet for the given key. - payload = UpdateSubnetPayload { - ecdsa_key_signing_enable: Some(vec![make_ecdsa_key("key_id_1")]), - ..empty_update_subnet_payload(subnet_id) - }; - - let response = try_call_via_universal_canister( - &fake_governance_canister, - ®istry, - "update_subnet", - Encode!(&payload).unwrap(), - ) - .await; - assert_matches!(response, Ok(_)); - - let subnet_record = get_subnet_record(®istry, subnet_id).await; - { - let chain_key_config = subnet_record.chain_key_config.unwrap(); - let max_queue_size = chain_key_config.key_configs[0].max_queue_size.unwrap(); - assert_eq!(max_queue_size, DEFAULT_ECDSA_MAX_QUEUE_SIZE); - } - - let new_signing_subnet_list: Vec<_> = get_value_or_panic::( - ®istry, - make_chain_key_signing_subnet_list_key(&MasterPublicKeyId::Ecdsa(make_ecdsa_key( - "key_id_1", - ))) - .as_bytes(), - ) - .await - .subnets - .into_iter() - .map(|subnet_bytes| subnet_id_try_from_protobuf(subnet_bytes).unwrap()) - .collect(); - - // The subnet is now responsible for signing with the key. - assert_eq!(new_signing_subnet_list, vec![subnet_id]); - - Ok(()) - }); -} - #[test] fn test_subnets_configuration_ecdsa_fields_are_updated_correctly() { let key_id = MasterPublicKeyId::Ecdsa(EcdsaKeyId { diff --git a/rs/registry/canister/unreleased_changelog.md b/rs/registry/canister/unreleased_changelog.md index 52f7e85fd0f..9d4e5e4a466 100644 --- a/rs/registry/canister/unreleased_changelog.md +++ b/rs/registry/canister/unreleased_changelog.md @@ -24,6 +24,13 @@ meet the requirements for an API boundary node (i.e., is configured with a domai ## Deprecated +The legacy ECDSA-specific fields are no longer supported in Registry canister's subnet operations +(creation, updating, recovery). Please use the more expressive chain key configuration keys: + +* `ecdsa_config` → `chain_key_config` +* `ecdsa_key_signing_enable` → `chain_key_signing_enable` +* `ecdsa_key_signing_disable` → `chain_key_signing_disable` + ## Removed ## Fixed diff --git a/rs/registry/subnet_features/src/lib.rs b/rs/registry/subnet_features/src/lib.rs index a1671279f2a..8d8fb33e243 100644 --- a/rs/registry/subnet_features/src/lib.rs +++ b/rs/registry/subnet_features/src/lib.rs @@ -222,39 +222,6 @@ impl TryFrom for ChainKeyConfig { } } -/// This code is part of the data migration from `EcdsaConfig` to `ChainKeyConfig`. -/// -/// Use this implementation to retrofit the values from an existing `EcdsaConfig` instance in places -/// where we now need a `ChainKeyConfig` instance. -/// -/// TODO[NNS1-2986]: Remove this code. -impl From for ChainKeyConfig { - fn from(src: EcdsaConfig) -> Self { - let EcdsaConfig { - key_ids, - quadruples_to_create_in_advance, - max_queue_size, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - } = src; - - let key_configs = key_ids - .into_iter() - .map(|key_id| KeyConfig { - key_id: MasterPublicKeyId::Ecdsa(key_id), - pre_signatures_to_create_in_advance: quadruples_to_create_in_advance, - max_queue_size: max_queue_size.unwrap_or(DEFAULT_ECDSA_MAX_QUEUE_SIZE), - }) - .collect(); - - Self { - key_configs, - signature_request_timeout_ns, - idkg_key_rotation_period_ms, - } - } -} - #[cfg(test)] mod tests { use ic_management_canister_types::EcdsaCurve; @@ -280,72 +247,6 @@ mod tests { ); } - #[test] - fn test_chain_key_config_from_ecdsa_config() { - // Run code under test. - let chain_key_config = ChainKeyConfig::from(EcdsaConfig { - quadruples_to_create_in_advance: 77, - key_ids: vec![EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "test_curve".to_string(), - }], - max_queue_size: Some(30), - signature_request_timeout_ns: Some(123_456), - idkg_key_rotation_period_ms: Some(321_654), - }); - // Assert expected result value. - assert_eq!( - chain_key_config, - ChainKeyConfig { - key_configs: vec![KeyConfig { - key_id: MasterPublicKeyId::Ecdsa(EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "test_curve".to_string(), - }), - pre_signatures_to_create_in_advance: 77, - max_queue_size: 30, - }], - signature_request_timeout_ns: Some(123_456), - idkg_key_rotation_period_ms: Some(321_654), - } - ); - } - - #[test] - fn test_chain_key_config_pb_from_ecdsa_config_pb() { - // Run code under test. - let chain_key_config_pb = pb::ChainKeyConfig::from(pb::EcdsaConfig { - quadruples_to_create_in_advance: 77, - key_ids: vec![pb_types::EcdsaKeyId { - curve: 1, - name: "test_curve".to_string(), - }], - max_queue_size: 30, - signature_request_timeout_ns: Some(123_456), - idkg_key_rotation_period_ms: Some(321_654), - }); - // Assert expected result value. - assert_eq!( - chain_key_config_pb, - pb::ChainKeyConfig { - key_configs: vec![pb::KeyConfig { - key_id: Some(pb_types::MasterPublicKeyId { - key_id: Some(pb_types::master_public_key_id::KeyId::Ecdsa( - pb_types::EcdsaKeyId { - curve: 1, - name: "test_curve".to_string(), - } - )), - }), - pre_signatures_to_create_in_advance: Some(77), - max_queue_size: Some(30), - }], - signature_request_timeout_ns: Some(123_456), - idkg_key_rotation_period_ms: Some(321_654), - } - ); - } - #[test] fn test_chain_key_config_round_trip() { // Run code under test. @@ -389,26 +290,4 @@ mod tests { assert_eq!(chain_key_config, chain_key_config_after_deser,); } - - #[test] - fn test_chain_key_config_pb_from_ecdsa_config() { - let ecdsa_config = EcdsaConfig { - quadruples_to_create_in_advance: 77, - key_ids: vec![EcdsaKeyId { - curve: EcdsaCurve::Secp256k1, - name: "test_curve".to_string(), - }], - max_queue_size: Some(30), - signature_request_timeout_ns: Some(123_456), - idkg_key_rotation_period_ms: Some(321_654), - }; - - let chain_key_config = ChainKeyConfig::from(ecdsa_config.clone()); - let chain_key_config_pb_a = pb::ChainKeyConfig::from(chain_key_config); - - let ecdsa_config_pb = pb::EcdsaConfig::from(ecdsa_config); - let chain_key_config_pb_b = pb::ChainKeyConfig::from(ecdsa_config_pb); - - assert_eq!(chain_key_config_pb_a, chain_key_config_pb_b); - } } diff --git a/rs/rosetta-api/icp/src/convert.rs b/rs/rosetta-api/icp/src/convert.rs index f24984635b5..f0ab787b948 100644 --- a/rs/rosetta-api/icp/src/convert.rs +++ b/rs/rosetta-api/icp/src/convert.rs @@ -1,37 +1,45 @@ mod state; -use crate::convert::state::State; -use crate::errors::ApiError; -use crate::models::amount::{from_amount, ledgeramount_from_amount}; -use crate::models::operation::OperationType; -use crate::models::{self, AccountIdentifier, BlockIdentifier, Operation}; -use crate::request::request_result::RequestResult; -use crate::request::transaction_operation_results::TransactionOperationResults; -use crate::request::transaction_results::TransactionResults; -use crate::request::Request; -use crate::request_types::{ - ChangeAutoStakeMaturityMetadata, DisburseMetadata, FollowMetadata, KeyMetadata, - MergeMaturityMetadata, NeuronIdentifierMetadata, NeuronInfoMetadata, PublicKeyOrPrincipal, - RegisterVoteMetadata, RequestResultMetadata, SetDissolveTimestampMetadata, SpawnMetadata, - StakeMaturityMetadata, Status, STATUS_COMPLETED, +use crate::{ + convert, + convert::state::State, + errors, + errors::ApiError, + models::{ + self, + amount::{from_amount, ledgeramount_from_amount}, + operation::OperationType, + AccountIdentifier, BlockIdentifier, Operation, + }, + request::{ + request_result::RequestResult, transaction_operation_results::TransactionOperationResults, + transaction_results::TransactionResults, Request, + }, + request_types::{ + ChangeAutoStakeMaturityMetadata, DisburseMetadata, FollowMetadata, KeyMetadata, + ListNeuronsMetadata, MergeMaturityMetadata, NeuronIdentifierMetadata, NeuronInfoMetadata, + PublicKeyOrPrincipal, RegisterVoteMetadata, RequestResultMetadata, + SetDissolveTimestampMetadata, SpawnMetadata, StakeMaturityMetadata, Status, + STATUS_COMPLETED, + }, + transaction_id::TransactionIdentifier, }; -use crate::transaction_id::TransactionIdentifier; -use crate::{convert, errors}; use dfn_protobuf::ProtoBuf; use ic_crypto_tree_hash::Path; use ic_ledger_canister_blocks_synchronizer::blocks::HashedBlock; use ic_ledger_core::block::BlockType; use ic_ledger_hash_of::HashOf; -use ic_types::messages::{HttpCanisterUpdate, HttpReadState}; -use ic_types::{CanisterId, PrincipalId}; +use ic_types::{ + messages::{HttpCanisterUpdate, HttpReadState}, + CanisterId, PrincipalId, +}; use icp_ledger::{ Block, BlockIndex, Operation as LedgerOperation, SendArgs, Subaccount, TimeStamp, Tokens, Transaction, }; use on_wire::{FromWire, IntoWire}; use rosetta_core::convert::principal_id_from_public_key; -use serde_json::map::Map; -use serde_json::{from_value, Number, Value}; +use serde_json::{from_value, map::Map, Number, Value}; use std::convert::{TryFrom, TryInto}; /// This module converts from ledger_canister data structures to Rosetta data @@ -270,7 +278,8 @@ pub fn operations_to_requests( } OperationType::ListNeurons => { validate_neuron_management_op()?; - state.list_neurons(account)?; + let ListNeuronsMetadata { page_number } = o.metadata.clone().try_into()?; + state.list_neurons(account, page_number)?; } OperationType::Burn | OperationType::Mint => { let msg = format!("Unsupported operation type: {:?}", o.type_); diff --git a/rs/rosetta-api/icp/src/convert/state.rs b/rs/rosetta-api/icp/src/convert/state.rs index f7cf42c169e..ab14dbb01a4 100644 --- a/rs/rosetta-api/icp/src/convert/state.rs +++ b/rs/rosetta-api/icp/src/convert/state.rs @@ -1,10 +1,12 @@ -use crate::errors::ApiError; -use crate::models::seconds::Seconds; -use crate::request::Request; -use crate::request_types::{ - AddHotKey, ChangeAutoStakeMaturity, Disburse, Follow, ListNeurons, MergeMaturity, NeuronInfo, - PublicKeyOrPrincipal, RefreshVotingPower, RegisterVote, RemoveHotKey, SetDissolveTimestamp, - Spawn, Stake, StakeMaturity, StartDissolve, StopDissolve, +use crate::{ + errors::ApiError, + models::seconds::Seconds, + request::Request, + request_types::{ + AddHotKey, ChangeAutoStakeMaturity, Disburse, Follow, ListNeurons, MergeMaturity, + NeuronInfo, PublicKeyOrPrincipal, RefreshVotingPower, RegisterVote, RemoveHotKey, + SetDissolveTimestamp, Spawn, Stake, StakeMaturity, StartDissolve, StopDissolve, + }, }; use ic_types::PrincipalId; use icp_ledger::{Operation, Tokens, DEFAULT_TRANSFER_FEE}; @@ -359,10 +361,16 @@ impl State { Ok(()) } - pub fn list_neurons(&mut self, account: icp_ledger::AccountIdentifier) -> Result<(), ApiError> { + pub fn list_neurons( + &mut self, + account: icp_ledger::AccountIdentifier, + page_number: Option, + ) -> Result<(), ApiError> { self.flush()?; - self.actions - .push(Request::ListNeurons(ListNeurons { account })); + self.actions.push(Request::ListNeurons(ListNeurons { + account, + page_number, + })); Ok(()) } diff --git a/rs/rosetta-api/icp/src/request.rs b/rs/rosetta-api/icp/src/request.rs index 021b0fb5c35..48e6c7c5267 100644 --- a/rs/rosetta-api/icp/src/request.rs +++ b/rs/rosetta-api/icp/src/request.rs @@ -144,7 +144,9 @@ impl Request { neuron_index: *neuron_index, controller: controller.map(PublicKeyOrPrincipal::Principal), }), - Request::ListNeurons(ListNeurons { .. }) => Ok(RequestType::ListNeurons), + Request::ListNeurons(ListNeurons { page_number, .. }) => Ok(RequestType::ListNeurons { + page_number: page_number.unwrap_or_default(), + }), Request::Follow(Follow { neuron_index, controller, @@ -478,7 +480,10 @@ impl TryFrom<&models::Request> for Request { Some(Err(e)) => Err(e), } } - RequestType::ListNeurons { .. } => Ok(Request::ListNeurons(ListNeurons { account })), + RequestType::ListNeurons { page_number } => Ok(Request::ListNeurons(ListNeurons { + account, + page_number: Some(*page_number), + })), RequestType::Follow { neuron_index, controller, diff --git a/rs/rosetta-api/icp/src/request_handler/construction_parse.rs b/rs/rosetta-api/icp/src/request_handler/construction_parse.rs index 8fe0b0e1bef..8eea124709c 100644 --- a/rs/rosetta-api/icp/src/request_handler/construction_parse.rs +++ b/rs/rosetta-api/icp/src/request_handler/construction_parse.rs @@ -97,7 +97,9 @@ impl RosettaRequestHandler { RequestType::StakeMaturity { neuron_index } => { stake_maturity(&mut requests, arg, from, neuron_index)? } - RequestType::ListNeurons => list_neurons(&mut requests, arg, from)?, + RequestType::ListNeurons { page_number } => { + list_neurons(&mut requests, arg, from, Some(page_number))? + } RequestType::NeuronInfo { neuron_index, controller, @@ -550,8 +552,12 @@ fn list_neurons( requests: &mut Vec, _arg: Blob, from: AccountIdentifier, + page_number: Option, ) -> Result<(), ApiError> { - requests.push(Request::ListNeurons(ListNeurons { account: from })); + requests.push(Request::ListNeurons(ListNeurons { + account: from, + page_number, + })); Ok(()) } diff --git a/rs/rosetta-api/icp/src/request_handler/construction_payloads.rs b/rs/rosetta-api/icp/src/request_handler/construction_payloads.rs index adbbc337072..d0b23a0d082 100644 --- a/rs/rosetta-api/icp/src/request_handler/construction_payloads.rs +++ b/rs/rosetta-api/icp/src/request_handler/construction_payloads.rs @@ -435,8 +435,9 @@ fn handle_list_neurons( include_neurons_readable_by_caller: true, include_empty_neurons_readable_by_caller: None, include_public_neurons_in_full_neurons: None, - page_number: None, + page_number: req.page_number, page_size: None, + neuron_subaccounts: None, }; let update = HttpCanisterUpdate { canister_id: Blob(ic_nns_constants::GOVERNANCE_CANISTER_ID.get().to_vec()), @@ -453,7 +454,12 @@ fn handle_list_neurons( &update, SignatureType::from(pk.curve_type), ); - updates.push((RequestType::ListNeurons, update)); + updates.push(( + RequestType::ListNeurons { + page_number: req.page_number.unwrap_or_default(), + }, + update, + )); Ok(()) } diff --git a/rs/rosetta-api/icp/src/request_types.rs b/rs/rosetta-api/icp/src/request_types.rs index d46fc2daa90..cdb5ef88637 100644 --- a/rs/rosetta-api/icp/src/request_types.rs +++ b/rs/rosetta-api/icp/src/request_types.rs @@ -1,18 +1,19 @@ -use crate::models::amount::{signed_amount, tokens_to_amount}; -use crate::models::operation::OperationType; -use crate::models::seconds::Seconds; -use crate::models::OperationIdentifier; use crate::{ convert::to_model_account_identifier, errors::ApiError, - models::{self, Operation}, + models::{ + self, + amount::{signed_amount, tokens_to_amount}, + operation::OperationType, + seconds::Seconds, + Operation, OperationIdentifier, + }, transaction_id::TransactionIdentifier, }; pub use ic_ledger_canister_blocks_synchronizer::blocks::RosettaBlocksMode; use ic_types::PrincipalId; use icp_ledger::{AccountIdentifier, BlockIndex, Operation as LedgerOperation, Tokens}; -use rosetta_core::convert::principal_id_from_public_key; -use rosetta_core::objects::ObjectMap; +use rosetta_core::{convert::principal_id_from_public_key, objects::ObjectMap}; use serde::{Deserialize, Serialize}; use serde_json::Value; use std::convert::TryFrom; @@ -96,7 +97,7 @@ pub enum RequestType { }, #[serde(rename = "LIST_NEURONS")] #[serde(alias = "ListNeurons")] - ListNeurons, + ListNeurons { page_number: u64 }, #[serde(rename = "FOLLOW")] #[serde(alias = "Follow")] Follow { @@ -370,6 +371,7 @@ pub struct NeuronInfo { #[derive(Clone, Eq, PartialEq, Debug, Deserialize, Serialize)] pub struct ListNeurons { pub account: icp_ledger::AccountIdentifier, + pub page_number: Option, } #[derive(Clone, Eq, PartialEq, Debug, Deserialize, Serialize)] @@ -855,6 +857,34 @@ impl TryFrom for ObjectMap { } } +#[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Deserialize, Serialize)] +pub struct ListNeuronsMetadata { + pub page_number: Option, +} + +impl TryFrom> for ListNeuronsMetadata { + type Error = ApiError; + fn try_from(o: Option) -> Result { + serde_json::from_value(serde_json::Value::Object(o.unwrap_or_default())).map_err(|e| { + ApiError::internal_error(format!( + "Could not parse LIST_NEURONS operation metadata from metadata JSON object: {}", + e + )) + }) + } +} + +impl TryFrom for ObjectMap { + type Error = ApiError; + fn try_from(d: ListNeuronsMetadata) -> Result { + match serde_json::to_value(d) { + Ok(Value::Object(o)) => Ok(o), + Ok(o) => Err(ApiError::internal_error(format!("Could not convert ListNeuronsMetadata to ObjectMap. Expected type Object but received: {:?}",o))), + Err(err) => Err(ApiError::internal_error(format!("Could not convert ListNeuronsMetadata to ObjectMap: {:?}",err))), + } + } +} + #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Debug, Deserialize, Serialize)] pub struct FollowMetadata { pub topic: i32, @@ -1391,7 +1421,10 @@ impl TransactionBuilder { } pub fn list_neurons(&mut self, req: &ListNeurons) -> Result<(), ApiError> { - let ListNeurons { account } = req; + let ListNeurons { + account, + page_number, + } = req; let operation_identifier = self.allocate_op_id(); self.ops.push(Operation { operation_identifier, @@ -1401,7 +1434,12 @@ impl TransactionBuilder { amount: None, related_operations: None, coin_change: None, - metadata: None, + metadata: Some( + ListNeuronsMetadata { + page_number: *page_number, + } + .try_into()?, + ), }); Ok(()) } diff --git a/rs/rosetta-api/icp/tests/system_tests/common/utils.rs b/rs/rosetta-api/icp/tests/system_tests/common/utils.rs index ed7e116a6f0..9117d003937 100644 --- a/rs/rosetta-api/icp/tests/system_tests/common/utils.rs +++ b/rs/rosetta-api/icp/tests/system_tests/common/utils.rs @@ -182,6 +182,7 @@ pub async fn list_neurons(agent: &Agent) -> ListNeuronsResponse { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None }) .unwrap() ) diff --git a/rs/sns/cli/src/lib.rs b/rs/sns/cli/src/lib.rs index 789e1c1cb5a..d4bb871ab0c 100644 --- a/rs/sns/cli/src/lib.rs +++ b/rs/sns/cli/src/lib.rs @@ -28,6 +28,7 @@ use std::{ sync::Once, }; use tempfile::NamedTempFile; +use upgrade_sns_controlled_canister::RefundAfterSnsControlledCanisterUpgradeArgs; pub mod deploy; pub mod health; pub mod init_config_file; @@ -127,6 +128,8 @@ pub enum SubCommand { /// Uploads a given Wasm to a (newly deployed) store canister and submits a proposal to upgrade /// using that Wasm. UpgradeSnsControlledCanister(UpgradeSnsControlledCanisterArgs), + /// Attempts to refund the unused cycles after an SNS-controlled canister has been upgraded. + RefundAfterSnsControlledCanisterUpgrade(RefundAfterSnsControlledCanisterUpgradeArgs), } impl CliArgs { diff --git a/rs/sns/cli/src/main.rs b/rs/sns/cli/src/main.rs index 2637deea223..3e10b9a667a 100644 --- a/rs/sns/cli/src/main.rs +++ b/rs/sns/cli/src/main.rs @@ -37,5 +37,13 @@ async fn main() -> Result<()> { } } } + SubCommand::RefundAfterSnsControlledCanisterUpgrade(args) => { + match upgrade_sns_controlled_canister::refund(args, &agent).await { + Ok(_) => Ok(()), + Err(err) => { + bail!("{}", err); + } + } + } } } diff --git a/rs/sns/cli/src/upgrade_sns_controlled_canister.rs b/rs/sns/cli/src/upgrade_sns_controlled_canister.rs index 8bd55cafeb7..645a8fad79b 100644 --- a/rs/sns/cli/src/upgrade_sns_controlled_canister.rs +++ b/rs/sns/cli/src/upgrade_sns_controlled_canister.rs @@ -6,17 +6,21 @@ use candid_utils::{ validation::{encode_upgrade_args, encode_upgrade_args_without_service}, }; use clap::Parser; +use core::convert::From; use cycles_minting_canister::{CanisterSettingsArgs, CreateCanister, SubnetSelection}; use ic_base_types::{CanisterId, PrincipalId}; use ic_management_canister_types::{BoundedVec, CanisterInstallMode}; use ic_nervous_system_agent::{ - management_canister, nns, - sns::{self, governance::SubmittedProposal, root::SnsCanisters}, + management_canister::{self, delete_canister, stop_canister}, + nns, + sns::{self, governance::SubmittedProposal, root::SnsCanisters, Sns}, CallCanisters, CanisterInfo, Request, }; use ic_nns_constants::CYCLES_LEDGER_CANISTER_ID; use ic_sns_governance_api::pb::v1::{ - proposal::Action, ChunkedCanisterWasm, Proposal, ProposalId, UpgradeSnsControlledCanister, + get_proposal_response, + proposal::{self, Action}, + ChunkedCanisterWasm, Proposal, ProposalData, ProposalId, UpgradeSnsControlledCanister, }; use ic_wasm::{metadata, utils::parse_wasm}; use itertools::{Either, Itertools}; @@ -36,7 +40,7 @@ const GZIPPED_WASM_HEADER: [u8; 3] = [0x1f, 0x8b, 0x08]; // The cycle fee for create request is 0.1T cycles. pub const CANISTER_CREATE_FEE: u128 = 100_000_000_000_u128; -pub const STORE_CANISTER_INITIAL_CYCLES_BALANCE: u128 = 5_000_000_000_000_u128; // 5T +pub const STORE_CANISTER_INITIAL_CYCLES_BALANCE: u128 = 1_000_000_000_000_u128; // 1T /// The arguments used to configure the upgrade_sns_controlled_canister command. #[derive(Debug, Parser)] @@ -68,6 +72,18 @@ pub struct UpgradeSnsControlledCanisterArgs { pub summary: String, } +/// The arguments used to configure the refund_after_sns_controlled_canister_upgrade command. +#[derive(Debug, Parser)] +pub struct RefundAfterSnsControlledCanisterUpgradeArgs { + /// ID of the target canister that was to be upgraded using upgrade-sns-controlled-canister. + #[clap(long)] + pub target_canister_id: CanisterId, + + /// ID of the proposal created by upgrade-sns-controlled-canister. Must be + #[clap(long)] + pub proposal_id: u64, +} + pub struct Wasm { path: PathBuf, bytes: Vec, @@ -255,6 +271,74 @@ pub struct UpgradeSnsControlledCanisterInfo { pub proposal_id: Option, } +pub async fn find_sns( + agent: &C, + caller_principal: PrincipalId, + target_canister_id: CanisterId, + target_controllers: BTreeSet, +) -> Result, UpgradeSnsControlledCanisterError> { + let (user_controllers, canister_controllers): (Vec<_>, Vec<_>) = + target_controllers.into_iter().partition_map(|controller| { + if controller.is_self_authenticating() { + Either::Left(controller) + } else { + Either::Right(controller) + } + }); + + if user_controllers.contains(&caller_principal) { + println!( + "\n\ + ⚠️ the target is controlled by the caller, which means it is not decentralized.\n\ + ⚠️ Proceed upgrading it directly." + ); + + Ok(None) + } else { + assert!( + !canister_controllers.is_empty(), + "The target canister is not controlled by an SNS." + ); + + assert_eq!( + canister_controllers.len(), + 1, + "The target canister has more than one canister controller!" + ); + + let canister_id = *canister_controllers.first().unwrap(); + + // TODO: Check that this is indeed an SNS canister controlling the target. + // + // This is expected to be `None` if we're running against a local replica. + // let root_subnet = nns::registry::get_subnet_for_canister(agent, canister_id) + // .await + // .ok(); + // let sns_subnets = sns_w.list_sns_subnets().await.unwrap(); + // assert!( + // sns_subnets.contains(&root_subnet), + // "Target canister is not controlled by an SNS!", + // ); + + let root_canister = sns::root::RootCanister { canister_id }; + + let response = root_canister.list_sns_canisters(agent).await?; + let SnsCanisters { sns, dapps } = + SnsCanisters::try_from(response).map_err(UpgradeSnsControlledCanisterError::Client)?; + + // Check that the target is indeed controlled by this SNS. + if !BTreeSet::from_iter(&dapps[..]).contains(&target_canister_id.get()) { + return Err(UpgradeSnsControlledCanisterError::Client(format!( + "{} is not one of the canisters controlled by the SNS with Root canister {}", + target_canister_id.get(), + root_canister.canister_id, + ))); + } + + Ok(Some(sns)) + } +} + pub async fn exec( args: UpgradeSnsControlledCanisterArgs, agent: &C, @@ -291,68 +375,13 @@ pub async fn exec( print!("Finding the SNS controlling this target canister ... "); std::io::stdout().flush().unwrap(); - let sns = { - let (user_controllers, canister_controllers): (Vec<_>, Vec<_>) = - target_controllers.into_iter().partition_map(|controller| { - if controller.is_self_authenticating() { - Either::Left(controller) - } else { - Either::Right(controller) - } - }); - - if user_controllers.contains(&caller_principal) { - println!( - "\n\ - ⚠️ the target is controlled by the caller, which means it is not decentralized.\n\ - ⚠️ Proceed upgrading it directly." - ); - - None // no SNS - } else { - assert!( - !canister_controllers.is_empty(), - "The target canister is not controlled by an SNS." - ); - - assert_eq!( - canister_controllers.len(), - 1, - "The target canister has more than one canister controller!" - ); - - let canister_id = *canister_controllers.first().unwrap(); - - // TODO: Check that this is indeed an SNS canister controlling the target. - // - // This is expected to be `None` if we're running against a local replica. - // let root_subnet = nns::registry::get_subnet_for_canister(agent, canister_id) - // .await - // .ok(); - // let sns_subnets = sns_w.list_sns_subnets().await.unwrap(); - // assert!( - // sns_subnets.contains(&root_subnet), - // "Target canister is not controlled by an SNS!", - // ); - - let root_canister = sns::root::RootCanister { canister_id }; - - let response = root_canister.list_sns_canisters(agent).await?; - let SnsCanisters { sns, dapps } = SnsCanisters::try_from(response) - .map_err(UpgradeSnsControlledCanisterError::Client)?; - - // Check that the target is indeed controlled by this SNS. - if !BTreeSet::from_iter(&dapps[..]).contains(&target_canister_id.get()) { - return Err(UpgradeSnsControlledCanisterError::Client(format!( - "{} is not one of the canisters controlled by the SNS with Root canister {}", - target_canister_id.get(), - root_canister.canister_id, - ))); - } - - Some(sns) - } - }; + let sns = find_sns( + agent, + caller_principal, + target_canister_id, + target_controllers, + ) + .await?; println!("✔️"); print!("Checking that we have a viable Wasm for this upgrade ... "); @@ -482,6 +511,138 @@ pub async fn exec( }) } +pub async fn refund( + args: RefundAfterSnsControlledCanisterUpgradeArgs, + agent: &C, +) -> Result<(), UpgradeSnsControlledCanisterError> { + let RefundAfterSnsControlledCanisterUpgradeArgs { + target_canister_id, + proposal_id, + } = args; + + let proposal_id = ProposalId { id: proposal_id }; + + let caller_principal = PrincipalId(agent.caller()?); + + print!("Getting target canister info ... "); + std::io::stdout().flush().unwrap(); + let CanisterInfo { + controllers: target_controllers, + module_hash: _, + } = agent + .canister_info(target_canister_id) + .await + .map_err(|err| { + UpgradeSnsControlledCanisterError::Client(format!( + "Cannot refund cycles, target canister {} does not seem to be installed: {}", + target_canister_id.get(), + err + )) + })?; + println!("✔️"); + + let target_controllers = target_controllers + .into_iter() + .map(PrincipalId) + .collect::>(); + + print!("Finding the SNS controlling this target canister ... "); + std::io::stdout().flush().unwrap(); + let sns = find_sns( + agent, + caller_principal, + target_canister_id, + target_controllers, + ) + .await?; + let sns = match sns { + Some(sns) => sns, + None => { + return Err(UpgradeSnsControlledCanisterError::Client(format!( + "Cannot find SNS controlling target canister {}", + target_canister_id.get() + ))); + } + }; + println!("✔️"); + + print!("Fetching the store canister ID ... "); + std::io::stdout().flush().unwrap(); + let sns_governance = sns::governance::GovernanceCanister { + canister_id: sns.governance.canister_id, + }; + let get_proposal_result = sns_governance + .get_proposal(agent, proposal_id) + .await + .map_err(UpgradeSnsControlledCanisterError::Agent)? + .result + .ok_or("Missing GetProposalResponse.result") + .map_err(|err| UpgradeSnsControlledCanisterError::Client(err.to_string()))?; + + let proposal = match get_proposal_result { + get_proposal_response::Result::Error(err) => { + return Err(UpgradeSnsControlledCanisterError::Client(format!( + "{err:?}" + ))); + } + get_proposal_response::Result::Proposal(ProposalData { + proposal, + executed_timestamp_seconds, + failed_timestamp_seconds, + .. + }) if executed_timestamp_seconds > 0 || failed_timestamp_seconds > 0 => proposal, + get_proposal_response::Result::Proposal(_) => { + return Err(UpgradeSnsControlledCanisterError::Client(format!( + "Proposal {} is still live; please repeat this command after it executes or fails.", + proposal_id.id, + ))); + } + }; + + let store_canister_id = match proposal { + Some(Proposal { + action: + Some(proposal::Action::UpgradeSnsControlledCanister(UpgradeSnsControlledCanister { + chunked_canister_wasm: + Some(ChunkedCanisterWasm { + store_canister_id: Some(store_canister_id), + .. + }), + .. + })), + .. + }) => store_canister_id, + _ => { + return Err(UpgradeSnsControlledCanisterError::Client(format!( + "Proposal {} is invalid or does not correspond to an SNS-controlled canister upgrade + with chunked Wasm.", + proposal_id.id, + ))); + } + }; + println!("✔️"); + + // TODO: Implement the actual reimbursement. + + print!("Deleting the store canister {} ... ", store_canister_id); + std::io::stdout().flush().unwrap(); + stop_canister( + agent, + CanisterId::unchecked_from_principal(store_canister_id), + ) + .await + .map_err(UpgradeSnsControlledCanisterError::Agent)?; + delete_canister( + agent, + CanisterId::unchecked_from_principal(store_canister_id), + ) + .await + .map_err(UpgradeSnsControlledCanisterError::Agent)?; + println!("✔️"); + + Ok(()) +} + pub type BlockIndex = Nat; #[derive(CandidType, Deserialize, Debug, Clone)] diff --git a/rs/sns/governance/CHANGELOG.md b/rs/sns/governance/CHANGELOG.md index 51b7b4e0ad0..ead8027e793 100644 --- a/rs/sns/governance/CHANGELOG.md +++ b/rs/sns/governance/CHANGELOG.md @@ -10,6 +10,46 @@ here were moved from the adjacent `unreleased_changelog.md` file. INSERT NEW RELEASES HERE + +# 2025-02-03: Proposal 135067 + +http://dashboard.internetcomputer.org/proposal/135067 + +# 2025-01-27: Proposal 134989 + +https://dashboard.internetcomputer.org/proposal/134989 + +## Added + +* Enable SNSs to opt in for +[automatically advancing its target version](https://forum.dfinity.org/t/proposal-opt-in-mechanism-for-automatic-sns-target-version-advancement/39874) +to the newest version blessed by the NNS. To do so, please submit a `ManageNervousSystemParameters` +proposal, e.g.: + + ```bash + dfx canister --network ic call $SNS_GOVERNANCE_CANISTER_ID manage_neuron '( + record { + subaccount = blob "'${PROPOSER_SNS_NEURON_SUBACCOUNT}'"; + command = opt variant { + MakeProposal = record { + url = "https://forum.dfinity.org/t/proposal-opt-in-mechanism-for-automatic-sns-target-version-advancement/39874"; + title = "Opt for automatic advancement of SNS target versions"; + action = opt variant { + ManageNervousSystemParameters = record { + automatically_advance_target_version = opt true; + } + }; + summary = "Enable automatically advancing the target version \ + of this SNS to speed up the delivery of SNS framework \ + upgrades that were already blessed by the NNS."; + } + }; + }, + )' + ``` + +* Do not redact chunked Wasm data in `ProposalInfo` served from `SnsGov.list_proposals`. + * https://nns.ic0.app/proposal/?proposal=134906 Enable upgrading SNS-controlled canisters using chunked WASMs. This is implemented as an extension diff --git a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs index 243ef08e4b9..13710a831b9 100644 --- a/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs +++ b/rs/sns/governance/api/src/ic_sns_governance.pb.v1.rs @@ -303,7 +303,9 @@ pub struct UpgradeSnsControlledCanister { /// Arguments passed to the post-upgrade method of the new wasm module. #[serde(deserialize_with = "ic_utils::deserialize::deserialize_option_blob")] pub canister_upgrade_arg: Option>, - /// Canister install_code mode. + /// Canister install_code mode. If specified, the integer value corresponds to + /// `ic_protobuf::types::v1::v1CanisterInstallMode` or `canister_install_mode` + /// (as per https://internetcomputer.org/docs/current/references/ic-interface-spec#ic-candid). pub mode: Option, /// If the entire WASM does not fit into the 2 MiB ingress limit, then `new_canister_wasm` should be /// an empty, and this field should be set instead. diff --git a/rs/sns/governance/src/types.rs b/rs/sns/governance/src/types.rs index 6996a4a29e6..14e823b0996 100644 --- a/rs/sns/governance/src/types.rs +++ b/rs/sns/governance/src/types.rs @@ -1914,16 +1914,18 @@ impl From for LedgerUpgradeArgs { token_symbol, token_logo, } = manage_ledger_parameters; - let metadata = [("icrc1:logo", token_logo.map(MetadataValue::Text))] - .into_iter() - .filter_map(|(k, v)| v.map(|v| (k.to_string(), v))) - .collect(); + + let metadata = token_logo.map(|token_logo| { + let key = "icrc1:logo".to_string(); + let value = MetadataValue::Text(token_logo); + vec![(key, value)] + }); LedgerUpgradeArgs { transfer_fee: transfer_fee.map(|tf| tf.into()), token_name, token_symbol, - metadata: Some(metadata), + metadata, ..LedgerUpgradeArgs::default() } } diff --git a/rs/sns/governance/src/types/tests.rs b/rs/sns/governance/src/types/tests.rs index 68d731cc512..670a6f8ca9d 100644 --- a/rs/sns/governance/src/types/tests.rs +++ b/rs/sns/governance/src/types/tests.rs @@ -6,6 +6,7 @@ use crate::pb::v1::{ neuron::Followees, ExecuteGenericNervousSystemFunction, Proposal, ProposalData, VotingRewardsParameters, }; +use candid::Nat; use futures::FutureExt; use ic_base_types::PrincipalId; use ic_management_canister_types::ChunkHash; @@ -1492,3 +1493,58 @@ fn test_validate_chunked_wasm_management_canister_call_returns_junk() { )]), ); } + +#[test] +fn test_from_manage_ledger_parameters_into_ledger_upgrade_args() { + let manage_ledger_parameters = ManageLedgerParameters { + transfer_fee: Some(111), + token_name: Some("abc".to_string()), + token_symbol: Some("xyz".to_string()), + token_logo: Some("".to_string()), + }; + + let observed = LedgerUpgradeArgs::from(manage_ledger_parameters); + + assert_eq!( + observed, + LedgerUpgradeArgs { + metadata: Some(vec![( + "icrc1:logo".to_string(), + MetadataValue::Text("".to_string()) + )]), + token_name: Some("abc".to_string()), + token_symbol: Some("xyz".to_string()), + transfer_fee: Some(Nat::from(111_u64)), + change_fee_collector: None, + max_memo_length: None, + feature_flags: None, + change_archive_options: None, + } + ); +} + +#[test] +fn test_from_manage_ledger_parameters_into_ledger_upgrade_args_no_logo() { + let manage_ledger_parameters = ManageLedgerParameters { + transfer_fee: Some(111), + token_name: Some("abc".to_string()), + token_symbol: Some("xyz".to_string()), + token_logo: None, + }; + + let observed = LedgerUpgradeArgs::from(manage_ledger_parameters); + + assert_eq!( + observed, + LedgerUpgradeArgs { + metadata: None, + token_name: Some("abc".to_string()), + token_symbol: Some("xyz".to_string()), + transfer_fee: Some(Nat::from(111_u64)), + change_fee_collector: None, + max_memo_length: None, + feature_flags: None, + change_archive_options: None, + } + ); +} diff --git a/rs/sns/governance/unreleased_changelog.md b/rs/sns/governance/unreleased_changelog.md index 775513d903e..6aab9b95dc8 100644 --- a/rs/sns/governance/unreleased_changelog.md +++ b/rs/sns/governance/unreleased_changelog.md @@ -9,35 +9,9 @@ on the process that this file is part of, see ## Added -* Enable SNSs to opt in for -[automatically advancing its target version](https://forum.dfinity.org/t/proposal-opt-in-mechanism-for-automatic-sns-target-version-advancement/39874) -to the newest version blessed by the NNS. To do so, please submit a `ManageNervousSystemParameters` -proposal, e.g.: - - ```bash - dfx canister --network ic call $SNS_GOVERNANCE_CANISTER_ID manage_neuron '( - record { - subaccount = blob "'${PROPOSER_SNS_NEURON_SUBACCOUNT}'"; - command = opt variant { - MakeProposal = record { - url = "https://forum.dfinity.org/t/proposal-opt-in-mechanism-for-automatic-sns-target-version-advancement/39874"; - title = "Opt for automatic advancement of SNS target versions"; - action = opt variant { - ManageNervousSystemParameters = record { - automatically_advance_target_version = opt true; - } - }; - summary = "Enable automatically advancing the target version \ - of this SNS to speed up the delivery of SNS framework \ - upgrades that were already blessed by the NNS."; - } - }; - }, - )' - ``` - -* Do not redact chunked Wasm data in `ProposalInfo` served from `SnsGov.list_proposals`. * Added the `query_stats` field for `canister_status`/`get_sns_canisters_summary` methods. +* Fix a bug due to which SNS ledger logos were sometimes unset after changing unrelated + SNS ledger metadata fields. ## Changed diff --git a/rs/sns/init/src/lib.rs b/rs/sns/init/src/lib.rs index d7396908408..77bcde55220 100644 --- a/rs/sns/init/src/lib.rs +++ b/rs/sns/init/src/lib.rs @@ -538,6 +538,17 @@ impl SnsInitPayload { }) } + pub fn stringify_without_logos(&self) -> Result { + let redacted_logo = "".to_string(); + let self_without_logos = Self { + logo: Some(redacted_logo.clone()), + token_logo: Some(redacted_logo), + ..self.clone() + }; + serde_yaml::to_string(&self_without_logos) + .map_err(|e| format!("Could not create initialization parameters {}", e)) + } + /// Construct the params used to initialize a SNS Governance canister. fn governance_init_args( &self, @@ -557,8 +568,7 @@ impl SnsInitPayload { governance.neurons = self.get_initial_neurons(¶meters)?; - governance.sns_initialization_parameters = serde_yaml::to_string(self) - .map_err(|e| format!("Could not create initialization parameters {}", e))?; + governance.sns_initialization_parameters = self.stringify_without_logos()?; Ok(governance) } @@ -2184,6 +2194,90 @@ mod test { ); } + #[test] + fn stringify_without_logos() { + let sns_init_payload = SnsInitPayload { + token_name: Some("ServiceNervousSystem Coin".to_string()), + token_symbol: Some("SNS".to_string()), + ..SnsInitPayload::with_valid_values_for_testing_post_execution() + }; + + let observed = sns_init_payload.stringify_without_logos(); + + let expected = "transaction_fee_e8s: 10000 +token_name: ServiceNervousSystem Coin +token_symbol: SNS +proposal_reject_cost_e8s: 100000000 +neuron_minimum_stake_e8s: 100000000 +fallback_controller_principal_ids: +- kflrj-iv6cy-aaaaa-aaaap-4ai +logo: +url: https://internetcomputer.org/ +name: ServiceNervousSystemTest +description: Description of an SNS Project +neuron_minimum_dissolve_delay_to_vote_seconds: 15778800 +initial_reward_rate_basis_points: 0 +final_reward_rate_basis_points: 0 +reward_rate_transition_duration_seconds: 0 +max_dissolve_delay_seconds: 252460800 +max_neuron_age_seconds_for_age_bonus: 126230400 +max_dissolve_delay_bonus_percentage: 100 +max_age_bonus_percentage: 25 +initial_voting_period_seconds: 345600 +wait_for_quiet_deadline_increase_seconds: 86400 +confirmation_text: null +restricted_countries: + iso_codes: + - CH +dapp_canisters: + canisters: + - id: hdjeo-vyaaa-aaaaa-aapua-cai +min_participants: 5 +min_icp_e8s: null +max_icp_e8s: null +min_direct_participation_icp_e8s: 12300000000 +max_direct_participation_icp_e8s: 65000000000 +min_participant_icp_e8s: 6500000000 +max_participant_icp_e8s: 65000000000 +swap_start_timestamp_seconds: 10000000 +swap_due_timestamp_seconds: 10086400 +neuron_basket_construction_parameters: + count: 5 + dissolve_delay_interval_seconds: 10001 +nns_proposal_id: 10 +neurons_fund_participation: true +token_logo: +neurons_fund_participation_constraints: + min_direct_participation_threshold_icp_e8s: 12300000000 + max_neurons_fund_participation_icp_e8s: 65000000000 + coefficient_intervals: + - from_direct_participation_icp_e8s: 0 + to_direct_participation_icp_e8s: 18446744073709551615 + slope_numerator: 1 + slope_denominator: 1 + intercept_icp_e8s: 0 + ideal_matched_participation_function: + serialized_representation: '{\"t_1\":\"33300.000000000\",\"t_2\":\"99900.000000000\",\"t_3\":\"166500.000000000\",\"t_4\":\"200000.0000000000\",\"cap\":\"100000.000000000\"}' +initial_token_distribution: !FractionalDeveloperVotingPower + developer_distribution: + developer_neurons: + - controller: 6fyp7-3ibaa-aaaaa-aaaap-4ai + stake_e8s: 100000000 + memo: 0 + dissolve_delay_seconds: 15778800 + vesting_period_seconds: null + treasury_distribution: + total_e8s: 500000000 + swap_distribution: + total_e8s: 10000000000 + initial_swap_amount_e8s: 10000000000 + airdrop_distribution: + airdrop_neurons: [] +".to_string(); + + assert_eq!(observed, Ok(expected)); + } + #[test] fn test_governance_init_args_has_generated_config() { // Build an sns_init_payload with defaults for non-governance related configuration. @@ -2212,12 +2306,14 @@ mod test { let governance = canister_payloads.governance; - // Assert that the Governance canister's params match the SnsInitPayload + // Assert that the init params match the SnsInitPayload (modulo logos). assert_eq!( - serde_yaml::from_str::(&governance.sns_initialization_parameters) - .unwrap(), - sns_init_payload + governance.sns_initialization_parameters, + sns_init_payload.stringify_without_logos().unwrap() ); + + // Assert that the init params can be deserialized. + serde_yaml::from_str::(&governance.sns_initialization_parameters).unwrap(); } #[test] diff --git a/rs/sns/integration_tests/src/governance.rs b/rs/sns/integration_tests/src/governance.rs index 4f57cfd8e06..7540df003fb 100644 --- a/rs/sns/integration_tests/src/governance.rs +++ b/rs/sns/integration_tests/src/governance.rs @@ -37,7 +37,8 @@ fn test_sns_initialization_parameters_are_set() { .await .expect("Error calling get_sns_initialization_parameters api"); - let expected_initialization_parameters = serde_yaml::to_string(&sns_init_payload).unwrap(); + let expected_initialization_parameters = + sns_init_payload.stringify_without_logos().unwrap(); assert_eq!( get_sns_initialization_parameters_response.sns_initialization_parameters, diff --git a/rs/sns/root/CHANGELOG.md b/rs/sns/root/CHANGELOG.md index 6062774a801..bf0c5d25da8 100644 --- a/rs/sns/root/CHANGELOG.md +++ b/rs/sns/root/CHANGELOG.md @@ -11,9 +11,20 @@ here were moved from the adjacent `unreleased_changelog.md` file. INSERT NEW RELEASES HERE +# 2025-02-03: Proposal 135066 + +http://dashboard.internetcomputer.org/proposal/135066 + +## Changed + +- The `LogVisibility` returned from `canister_status` has one more variant `allowed_viewers`, + consistent with the corresponding management canister API. Calling `canister_status` for a + canister with such a log visibility setting will no longer panic. + + # 2025-01-20: Proposal 134905 -http://dashboard.internetcomputer.org/proposals/134905 +http://dashboard.internetcomputer.org/proposal/134905 ## Added diff --git a/rs/sns/root/unreleased_changelog.md b/rs/sns/root/unreleased_changelog.md index 2dde54ef615..f3ee78e104e 100644 --- a/rs/sns/root/unreleased_changelog.md +++ b/rs/sns/root/unreleased_changelog.md @@ -13,10 +13,6 @@ on the process that this file is part of, see ## Changed -* The `LogVisibility` returned from `canister_status` has one more variant `allowed_viewers`, - consistent with the corresponding management canister API. Calling `canister_status` for a - canister with such a log visibility setting will no longer panic. - ## Deprecated ## Removed diff --git a/rs/tests/driver/src/canister_api.rs b/rs/tests/driver/src/canister_api.rs index d575641426a..1e1d4f3e1b1 100644 --- a/rs/tests/driver/src/canister_api.rs +++ b/rs/tests/driver/src/canister_api.rs @@ -740,6 +740,7 @@ impl ListNnsNeuronsRequest { include_public_neurons_in_full_neurons: None, page_number: None, page_size: None, + neuron_subaccounts: None, }, } }