diff --git a/Cargo.lock b/Cargo.lock index 09d637717..f08f1ca0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,14 +175,41 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4436e0292ab1bb631b42973c61205e704475fe8126af845c8d923c0996328127" +[[package]] +name = "anchor-attribute-access-control" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "proc-macro2", + "quote", + "regex", + "syn 1.0.109", +] + [[package]] name = "anchor-attribute-access-control" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-account" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "bs58 0.4.0", "proc-macro2", "quote", + "rustversion", "syn 1.0.109", ] @@ -191,19 +218,40 @@ name = "anchor-attribute-account" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", "bs58 0.5.1", "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "anchor-attribute-constant" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "proc-macro2", + "syn 1.0.109", +] + [[package]] name = "anchor-attribute-constant" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-error" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "proc-macro2", "quote", "syn 1.0.109", ] @@ -213,7 +261,19 @@ name = "anchor-attribute-error" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-event" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "proc-macro2", "quote", "syn 1.0.109", ] @@ -223,7 +283,32 @@ name = "anchor-attribute-event" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-interface" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "heck 0.3.3", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-program" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", "proc-macro2", "quote", "syn 1.0.109", @@ -234,7 +319,19 @@ name = "anchor-attribute-program" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "anchor-attribute-state" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "proc-macro2", "quote", "syn 1.0.109", ] @@ -244,7 +341,7 @@ name = "anchor-client" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-lang", + "anchor-lang 0.29.0", "anyhow", "bytemuck", "futures", @@ -252,18 +349,30 @@ dependencies = [ "serde", "solana-account-decoder", "solana-client", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tokio", "url", ] +[[package]] +name = "anchor-derive-accounts" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-syn 0.24.2", + "anyhow", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "anchor-derive-accounts" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", "quote", "syn 1.0.109", ] @@ -273,7 +382,7 @@ name = "anchor-derive-serde" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-syn", + "anchor-syn 0.29.0", "borsh-derive-internal 0.10.3", "proc-macro2", "quote", @@ -290,18 +399,41 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "anchor-lang" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-attribute-access-control 0.24.2", + "anchor-attribute-account 0.24.2", + "anchor-attribute-constant 0.24.2", + "anchor-attribute-error 0.24.2", + "anchor-attribute-event 0.24.2", + "anchor-attribute-interface", + "anchor-attribute-program 0.24.2", + "anchor-attribute-state", + "anchor-derive-accounts 0.24.2", + "arrayref", + "base64 0.13.1", + "bincode", + "borsh 0.10.3", + "bytemuck", + "solana-program 2.0.0", + "thiserror", +] + [[package]] name = "anchor-lang" version = "0.29.0" source = "git+https://github.com/dhruvja/anchor#90a3008fcbbc5bcbc704cd6cccf61ef130c5f9eb" dependencies = [ - "anchor-attribute-access-control", - "anchor-attribute-account", - "anchor-attribute-constant", - "anchor-attribute-error", - "anchor-attribute-event", - "anchor-attribute-program", - "anchor-derive-accounts", + "anchor-attribute-access-control 0.29.0", + "anchor-attribute-account 0.29.0", + "anchor-attribute-constant 0.29.0", + "anchor-attribute-error 0.29.0", + "anchor-attribute-event 0.29.0", + "anchor-attribute-program 0.29.0", + "anchor-derive-accounts 0.29.0", "anchor-derive-serde", "anchor-derive-space", "arrayref", @@ -310,7 +442,7 @@ dependencies = [ "borsh 0.10.3", "bytemuck", "getrandom 0.2.15", - "solana-program", + "solana-program 1.17.30", "thiserror", ] @@ -320,14 +452,32 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c4fd6e43b2ca6220d2ef1641539e678bfc31b6cc393cf892b373b5997b6a39a" dependencies = [ - "anchor-lang", + "anchor-lang 0.29.0", "mpl-token-metadata", - "solana-program", + "solana-program 1.17.30", "spl-associated-token-account", "spl-token", "spl-token-2022 0.9.0", ] +[[package]] +name = "anchor-syn" +version = "0.24.2" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anyhow", + "bs58 0.3.1", + "heck 0.3.3", + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "serde", + "serde_json", + "sha2 0.9.9", + "syn 1.0.109", + "thiserror", +] + [[package]] name = "anchor-syn" version = "0.29.0" @@ -942,6 +1092,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -1279,6 +1435,16 @@ dependencies = [ "hashbrown 0.13.2", ] +[[package]] +name = "borsh" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +dependencies = [ + "borsh-derive 1.5.1", + "cfg_aliases 0.2.1", +] + [[package]] name = "borsh-derive" version = "0.9.3" @@ -1305,6 +1471,20 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "borsh-derive" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +dependencies = [ + "once_cell", + "proc-macro-crate 3.1.0", + "proc-macro2", + "quote", + "syn 2.0.61", + "syn_derive", +] + [[package]] name = "borsh-derive-internal" version = "0.9.3" @@ -1391,6 +1571,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "bs58" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "476e9cd489f9e121e02ffa6014a8ef220ecb15c05ed23fc34cca13925dc283fb" + [[package]] name = "bs58" version = "0.4.0" @@ -1567,7 +1753,7 @@ dependencies = [ [[package]] name = "cf-guest" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "borsh 0.10.3", "bytemuck", @@ -1680,6 +1866,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chacha20" version = "0.9.1" @@ -4842,7 +5034,7 @@ dependencies = [ [[package]] name = "guestchain" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "borsh 0.10.3", "bytemuck", @@ -5533,7 +5725,7 @@ name = "hyperspace-solana" version = "0.1.0" dependencies = [ "anchor-client", - "anchor-lang", + "anchor-lang 0.29.0", "anchor-spl", "anyhow", "async-trait", @@ -5620,7 +5812,7 @@ dependencies = [ name = "hyperspace-testsuite" version = "0.1.0" dependencies = [ - "anchor-lang", + "anchor-lang 0.29.0", "anyhow", "async-trait", "finality-grandpa", @@ -6522,12 +6714,16 @@ dependencies = [ name = "icsxx-cf-solana" version = "0.0.1" dependencies = [ + "assert_matches", + "bincode", + "bitflags 2.5.0", "borsh 0.10.3", "bytemuck", "cf-guest 0.0.0", "derive_more", "ed25519-consensus", "guestchain", + "hex", "ibc 0.15.0", "ibc-core-client-context", "ibc-core-client-types", @@ -6537,14 +6733,20 @@ dependencies = [ "ibc-primitives 0.50.0 (git+https://github.com/mina86/ibc-rs?rev=6015aea441d4660f7f7ecd89b5e770a993448089)", "ibc-proto 0.18.0", "insta", + "itertools 0.11.0", "lib", "memory", + "num_enum 0.7.3", "prost 0.11.9", "prost 0.12.3", - "prost-build 0.11.9", + "prost-build 0.12.3", + "proto-utils", "rand 0.8.5", + "rand_chacha 0.3.1", "sealable-trie", "serde", + "solana-sdk 2.0.0", + "static_assertions", "stdx", "tendermint-proto 0.34.1", "trie-ids", @@ -6841,7 +7043,7 @@ dependencies = [ "prost 0.12.3", "prost-types 0.12.3", "solana-perf", - "solana-sdk", + "solana-sdk 1.17.30", "tonic 0.10.2", "tonic-build 0.10.2", ] @@ -6857,7 +7059,7 @@ dependencies = [ "prost-types 0.12.3", "solana-client", "solana-metrics", - "solana-sdk", + "solana-sdk 1.17.30", "solana-transaction-status", "thiserror", "tokio", @@ -7239,14 +7441,14 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "lib" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "base64 0.21.7", "borsh 0.10.3", "bytemuck", "derive_more", "sha2 0.10.8", - "solana-program", + "solana-program 1.17.30", "stdx", ] @@ -8112,7 +8314,7 @@ dependencies = [ [[package]] name = "memory" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "derive_more", "stdx", @@ -8296,7 +8498,7 @@ dependencies = [ "borsh 0.10.3", "num-derive 0.3.3", "num-traits", - "solana-program", + "solana-program 1.17.30", "thiserror", ] @@ -8758,11 +8960,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "num_enum_derive 0.7.2", + "num_enum_derive 0.7.3", ] [[package]] @@ -8779,9 +8981,9 @@ dependencies = [ [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", @@ -9403,6 +9605,7 @@ dependencies = [ "ics10-grandpa", "ics11-beefy", "ics23 0.10.0", + "icsxx-cf-solana", "light-client-common", "log", "orml-tokens", @@ -12010,6 +12213,19 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bf29726d67464d49fa6224a1d07936a8c08bb3fba727c7493f6cf1616fdaada" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "version_check", + "yansi", +] + [[package]] name = "prometheus" version = "0.13.4" @@ -12201,7 +12417,7 @@ dependencies = [ [[package]] name = "proto-utils" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "const_format", "derive_more", @@ -14614,7 +14830,7 @@ dependencies = [ [[package]] name = "sealable-trie" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "ascii 1.1.0", "base64 0.21.7", @@ -15176,7 +15392,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-config-program", - "solana-sdk", + "solana-sdk 1.17.30", "spl-token", "spl-token-2022 1.0.0", "spl-token-group-interface", @@ -15188,10 +15404,10 @@ dependencies = [ [[package]] name = "solana-allocator" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", ] [[package]] @@ -15204,7 +15420,7 @@ dependencies = [ "clap 2.34.0", "rpassword", "solana-remote-wallet", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tiny-bip39 0.8.2", "uriparse", @@ -15235,7 +15451,7 @@ dependencies = [ "solana-rpc-client", "solana-rpc-client-api", "solana-rpc-client-nonce-utils", - "solana-sdk", + "solana-sdk 1.17.30", "solana-streamer", "solana-thin-client", "solana-tpu-client", @@ -15255,7 +15471,7 @@ dependencies = [ "serde", "serde_derive", "solana-program-runtime", - "solana-sdk", + "solana-sdk 1.17.30", ] [[package]] @@ -15275,7 +15491,7 @@ dependencies = [ "rcgen", "solana-measure", "solana-metrics", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tokio", ] @@ -15305,7 +15521,30 @@ dependencies = [ "serde_derive", "serde_json", "sha2 0.10.8", - "solana-frozen-abi-macro", + "solana-frozen-abi-macro 1.17.30", + "subtle", + "thiserror", +] + +[[package]] +name = "solana-frozen-abi" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "block-buffer 0.10.4", + "bs58 0.4.0", + "bv", + "either", + "generic-array 0.14.7", + "im", + "log", + "memmap2", + "rustc_version", + "serde", + "serde_bytes", + "serde_derive", + "sha2 0.10.8", + "solana-frozen-abi-macro 2.0.0", "subtle", "thiserror", ] @@ -15322,12 +15561,23 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "solana-frozen-abi-macro" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.61", +] + [[package]] name = "solana-ibc" version = "0.1.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ - "anchor-lang", + "anchor-lang 0.29.0", "anchor-spl", "base64 0.21.7", "bytemuck", @@ -15371,6 +15621,16 @@ dependencies = [ "log", ] +[[package]] +name = "solana-logger" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "env_logger 0.9.3", + "lazy_static", + "log", +] + [[package]] name = "solana-measure" version = "1.17.30" @@ -15378,7 +15638,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "857178177c6b378bcfc35df6867a6eef211059f5e4ab01ee87355d6b7493b556" dependencies = [ "log", - "solana-sdk", + "solana-sdk 1.17.30", ] [[package]] @@ -15392,7 +15652,7 @@ dependencies = [ "lazy_static", "log", "reqwest", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", ] @@ -15411,8 +15671,8 @@ dependencies = [ "serde", "serde_derive", "socket2 0.5.7", - "solana-logger", - "solana-sdk", + "solana-logger 1.17.30", + "solana-sdk 1.17.30", "solana-version", "tokio", "url", @@ -15439,11 +15699,11 @@ dependencies = [ "rayon", "rustc_version", "serde", - "solana-frozen-abi", - "solana-frozen-abi-macro", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", "solana-metrics", "solana-rayon-threadlimit", - "solana-sdk", + "solana-sdk 1.17.30", "solana-vote-program", ] @@ -15492,9 +15752,63 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sha3 0.10.8", - "solana-frozen-abi", - "solana-frozen-abi-macro", - "solana-sdk-macro", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", + "solana-sdk-macro 1.17.30", + "thiserror", + "tiny-bip39 0.8.2", + "wasm-bindgen", + "zeroize", +] + +[[package]] +name = "solana-program" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "ark-bn254", + "ark-ec", + "ark-ff", + "ark-serialize", + "base64 0.22.1", + "bincode", + "bitflags 2.5.0", + "blake3", + "borsh 0.10.3", + "borsh 0.9.3", + "borsh 1.5.1", + "bs58 0.4.0", + "bv", + "bytemuck", + "cc", + "console_error_panic_hook", + "console_log", + "curve25519-dalek 3.2.1", + "getrandom 0.2.15", + "itertools 0.10.5", + "js-sys", + "lazy_static", + "libc", + "libsecp256k1 0.6.0", + "light-poseidon", + "log", + "memoffset 0.9.1", + "num-bigint 0.4.5", + "num-derive 0.4.2", + "num-traits", + "parking_lot 0.12.2", + "rand 0.8.5", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "solana-frozen-abi 2.0.0", + "solana-frozen-abi-macro 2.0.0", + "solana-sdk-macro 2.0.0", "thiserror", "tiny-bip39 0.8.2", "wasm-bindgen", @@ -15520,11 +15834,11 @@ dependencies = [ "rand 0.8.5", "rustc_version", "serde", - "solana-frozen-abi", - "solana-frozen-abi-macro", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", "solana-measure", "solana-metrics", - "solana-sdk", + "solana-sdk 1.17.30", "solana_rbpf", "thiserror", ] @@ -15545,7 +15859,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-rpc-client-api", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tokio", "tokio-stream", @@ -15575,7 +15889,7 @@ dependencies = [ "solana-metrics", "solana-net-utils", "solana-rpc-client-api", - "solana-sdk", + "solana-sdk 1.17.30", "solana-streamer", "thiserror", "tokio", @@ -15605,7 +15919,7 @@ dependencies = [ "parking_lot 0.12.2", "qstring", "semver 1.0.23", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "uriparse", ] @@ -15629,7 +15943,7 @@ dependencies = [ "serde_json", "solana-account-decoder", "solana-rpc-client-api", - "solana-sdk", + "solana-sdk 1.17.30", "solana-transaction-status", "solana-version", "solana-vote-program", @@ -15651,7 +15965,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", - "solana-sdk", + "solana-sdk 1.17.30", "solana-transaction-status", "solana-version", "spl-token-2022 1.0.0", @@ -15667,7 +15981,7 @@ dependencies = [ "clap 2.34.0", "solana-clap-utils", "solana-rpc-client", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", ] @@ -15715,11 +16029,66 @@ dependencies = [ "serde_with", "sha2 0.10.8", "sha3 0.10.8", - "solana-frozen-abi", - "solana-frozen-abi-macro", - "solana-logger", - "solana-program", - "solana-sdk-macro", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", + "solana-logger 1.17.30", + "solana-program 1.17.30", + "solana-sdk-macro 1.17.30", + "thiserror", + "uriparse", + "wasm-bindgen", +] + +[[package]] +name = "solana-sdk" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "anchor-lang 0.24.2", + "assert_matches", + "base64 0.22.1", + "bincode", + "bitflags 2.5.0", + "borsh 1.5.1", + "bs58 0.4.0", + "bytemuck", + "byteorder", + "chrono", + "derivation-path", + "digest 0.10.7", + "ed25519-dalek 1.0.1", + "ed25519-dalek-bip32", + "generic-array 0.14.7", + "hmac 0.12.1", + "itertools 0.10.5", + "js-sys", + "lazy_static", + "libsecp256k1 0.6.0", + "log", + "memmap2", + "num-derive 0.4.2", + "num-traits", + "num_enum 0.7.3", + "pbkdf2 0.11.0", + "qstring", + "qualifier_attr", + "rand 0.7.3", + "rand 0.8.5", + "rustc_version", + "rustversion", + "serde", + "serde_bytes", + "serde_derive", + "serde_json", + "serde_with", + "sha2 0.10.8", + "sha3 0.10.8", + "siphasher", + "solana-frozen-abi 2.0.0", + "solana-frozen-abi-macro 2.0.0", + "solana-logger 2.0.0", + "solana-program 2.0.0", + "solana-sdk-macro 2.0.0", "thiserror", "uriparse", "wasm-bindgen", @@ -15738,6 +16107,18 @@ dependencies = [ "syn 2.0.61", ] +[[package]] +name = "solana-sdk-macro" +version = "2.0.0" +source = "git+https://github.com/ComposableFi/mantis-solana?rev=d92c52575aaf430ca1f06bc2589f26b0fedde7bf#d92c52575aaf430ca1f06bc2589f26b0fedde7bf" +dependencies = [ + "bs58 0.4.0", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.61", +] + [[package]] name = "solana-security-txt" version = "1.1.1" @@ -15747,7 +16128,7 @@ checksum = "468aa43b7edb1f9b7b7b686d5c3aeb6630dc1708e86e31343499dd5c4d775183" [[package]] name = "solana-signature-verifier" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "base64 0.21.7", "borsh 0.10.3", @@ -15756,7 +16137,7 @@ dependencies = [ "derive_more", "guestchain", "lib", - "solana-program", + "solana-program 1.17.30", "stdx", ] @@ -15786,7 +16167,7 @@ dependencies = [ "rustls 0.21.12", "solana-metrics", "solana-perf", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tokio", "x509-parser", @@ -15804,7 +16185,7 @@ dependencies = [ "solana-connection-cache", "solana-rpc-client", "solana-rpc-client-api", - "solana-sdk", + "solana-sdk 1.17.30", ] [[package]] @@ -15826,7 +16207,7 @@ dependencies = [ "solana-pubsub-client", "solana-rpc-client", "solana-rpc-client-api", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", "tokio", ] @@ -15848,7 +16229,7 @@ dependencies = [ "serde_derive", "serde_json", "solana-account-decoder", - "solana-sdk", + "solana-sdk 1.17.30", "spl-associated-token-account", "spl-memo", "spl-token", @@ -15859,13 +16240,13 @@ dependencies = [ [[package]] name = "solana-trie" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "bytemuck", "lib", "memory", "sealable-trie", - "solana-program", + "solana-program 1.17.30", "stdx", ] @@ -15878,7 +16259,7 @@ dependencies = [ "async-trait", "solana-connection-cache", "solana-net-utils", - "solana-sdk", + "solana-sdk 1.17.30", "solana-streamer", "thiserror", "tokio", @@ -15895,9 +16276,9 @@ dependencies = [ "semver 1.0.23", "serde", "serde_derive", - "solana-frozen-abi", - "solana-frozen-abi-macro", - "solana-sdk", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", + "solana-sdk 1.17.30", ] [[package]] @@ -15913,21 +16294,21 @@ dependencies = [ "rustc_version", "serde", "serde_derive", - "solana-frozen-abi", - "solana-frozen-abi-macro", + "solana-frozen-abi 1.17.30", + "solana-frozen-abi-macro 1.17.30", "solana-metrics", - "solana-program", + "solana-program 1.17.30", "solana-program-runtime", - "solana-sdk", + "solana-sdk 1.17.30", "thiserror", ] [[package]] name = "solana-write-account" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ - "solana-program", + "solana-program 1.17.30", "stdx", ] @@ -15953,8 +16334,8 @@ dependencies = [ "serde", "serde_json", "sha3 0.9.1", - "solana-program", - "solana-sdk", + "solana-program 1.17.30", + "solana-sdk 1.17.30", "subtle", "thiserror", "zeroize", @@ -17121,7 +17502,7 @@ dependencies = [ "borsh 0.10.3", "num-derive 0.4.2", "num-traits", - "solana-program", + "solana-program 1.17.30", "spl-token", "spl-token-2022 1.0.0", "thiserror", @@ -17134,7 +17515,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daa600f2fe56f32e923261719bae640d873edadbc5237681a39b8e37bfd4d263" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator-derive", ] @@ -17168,7 +17549,7 @@ version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f180b03318c3dbab3ef4e1e4d46d5211ae3c780940dd0a28695aba4b59a75a" dependencies = [ - "solana-program", + "solana-program 1.17.30", ] [[package]] @@ -17179,7 +17560,7 @@ checksum = "85a5db7e4efb1107b0b8e52a13f035437cdcb36ef99c58f6d467f089d9b2915a" dependencies = [ "borsh 0.10.3", "bytemuck", - "solana-program", + "solana-program 1.17.30", "solana-zk-token-sdk", "spl-program-error", ] @@ -17192,7 +17573,7 @@ checksum = "7e0657b6490196971d9e729520ba934911ff41fbb2cb9004463dbe23cf8b4b4f" dependencies = [ "num-derive 0.4.2", "num-traits", - "solana-program", + "solana-program 1.17.30", "spl-program-error-derive", "thiserror", ] @@ -17216,7 +17597,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "062e148d3eab7b165582757453632ffeef490c02c86a48bfdb4988f63eefb3b9" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17230,7 +17611,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56f335787add7fa711819f9e7c573f8145a5358a709446fe2d24bf2a88117c90" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17248,7 +17629,7 @@ dependencies = [ "num-derive 0.3.3", "num-traits", "num_enum 0.6.1", - "solana-program", + "solana-program 1.17.30", "thiserror", ] @@ -17262,8 +17643,8 @@ dependencies = [ "bytemuck", "num-derive 0.4.2", "num-traits", - "num_enum 0.7.2", - "solana-program", + "num_enum 0.7.3", + "solana-program 1.17.30", "solana-zk-token-sdk", "spl-memo", "spl-pod", @@ -17284,8 +17665,8 @@ dependencies = [ "bytemuck", "num-derive 0.4.2", "num-traits", - "num_enum 0.7.2", - "solana-program", + "num_enum 0.7.3", + "solana-program 1.17.30", "solana-security-txt", "solana-zk-token-sdk", "spl-memo", @@ -17305,7 +17686,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b889509d49fa74a4a033ca5dae6c2307e9e918122d97e58562f5c4ffa795c75d" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17318,7 +17699,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c16ce3ba6979645fb7627aa1e435576172dd63088dc7848cb09aa331fa1fe4f" dependencies = [ "borsh 0.10.3", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17333,7 +17714,7 @@ checksum = "051d31803f873cabe71aec3c1b849f35248beae5d19a347d93a5c9cccc5d5a9b" dependencies = [ "arrayref", "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17349,7 +17730,7 @@ checksum = "7aabdb7c471566f6ddcee724beb8618449ea24b399e58d464d6b5bc7db550259" dependencies = [ "arrayref", "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17364,7 +17745,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f9ebd75d29c5f48de5f6a9c114e08531030b75b8ac2c557600ac7da0b73b1e8" dependencies = [ "bytemuck", - "solana-program", + "solana-program 1.17.30", "spl-discriminator", "spl-pod", "spl-program-error", @@ -17404,7 +17785,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a2a1c578e98c1c16fc3b8ec1328f7659a500737d7a0c6d625e73e830ff9c1f6" dependencies = [ "bitflags 1.3.2", - "cfg_aliases", + "cfg_aliases 0.1.1", "libc", "parking_lot 0.11.2", "parking_lot_core 0.8.6", @@ -17418,7 +17799,7 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70a2595fc3aa78f2d0e45dd425b22282dd863273761cc77780914b2cf3003acf" dependencies = [ - "cfg_aliases", + "cfg_aliases 0.1.1", "memchr", "proc-macro2", "quote", @@ -17428,7 +17809,7 @@ dependencies = [ [[package]] name = "stdx" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" [[package]] name = "strsim" @@ -17746,6 +18127,18 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.61", +] + [[package]] name = "sync_wrapper" version = "0.1.2" @@ -18713,7 +19106,7 @@ dependencies = [ [[package]] name = "trie-ids" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "ascii 1.1.0", "base64 0.21.7", @@ -19146,7 +19539,7 @@ dependencies = [ [[package]] name = "wasm" version = "0.0.0" -source = "git+https://github.com/composableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" +source = "git+https://github.com/ComposableFi/emulated-light-client/#0c0f4b1f034639854a50217fd777d0a13a2dc55a" dependencies = [ "const_format", "derive_more", @@ -20162,6 +20555,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yap" version = "0.10.0" diff --git a/Cargo.toml b/Cargo.toml index c6c3bcd55..322a952c9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,6 +72,8 @@ ibc-core-host = { git = "https://github.com/mina86/ibc-rs", rev = "2b50319f3460d ibc-core-host-types = { git = "https://github.com/mina86/ibc-rs", rev = "2b50319f3460d03e8781e5a834c4b56bdcfdc5bc", default-features = false } ibc-primitives = { git = "https://github.com/mina86/ibc-rs", rev = "2b50319f3460d03e8781e5a834c4b56bdcfdc5bc", default-features = false } ibc-testkit = { git = "https://github.com/mina86/ibc-rs", rev = "2b50319f3460d03e8781e5a834c4b56bdcfdc5bc", default-features = false } +solana-sdk = { git = "https://github.com/ComposableFi/mantis-solana", rev = "d92c52575aaf430ca1f06bc2589f26b0fedde7bf" } + # ibc-testkit = { git = "https://github.com/mina86/ibc-rs", rev = "6015aea441d4660f7f7ecd89b5e770a993448089" } #ibc = { path = "../ibc-rs-mina/ibc" } #ibc-app-nft-transfer = { path = "../ibc-rs-mina/ibc-apps/ics721-nft-transfer" } diff --git a/contracts/pallet-ibc/Cargo.toml b/contracts/pallet-ibc/Cargo.toml index bbd301ae2..cb897e930 100644 --- a/contracts/pallet-ibc/Cargo.toml +++ b/contracts/pallet-ibc/Cargo.toml @@ -4,8 +4,8 @@ homepage = "https://composable.finance" name = "pallet-ibc" version = "0.0.1" authors = [ - "Seun Lanlege ", - "David Salami ", + "Seun Lanlege ", + "David Salami ", ] [package.metadata.docs.rs] @@ -13,11 +13,11 @@ targets = ["x86_64-unknown-linux-gnu"] [package.metadata.cargo-udeps.ignore] normal = [ - "frame-benchmarking", - "pallet-ibc-ping", - "balances", - "pallet-assets", - "simple-iavl", + "frame-benchmarking", + "pallet-ibc-ping", + "balances", + "pallet-assets", + "simple-iavl", ] [dependencies] @@ -25,13 +25,13 @@ normal = [ log = { version = "0.4.0", default-features = false } serde = { version = "1.0.136", default-features = false, features = ["derive"] } scale-info = { version = "2.1.1", default-features = false, features = [ - "derive", + "derive", ] } serde_json = { version = "1.0.45", default-features = false } sha2 = { version = "0.10.2", default-features = false } derive_more = { version = "0.99.17", default-features = false, features = [ - "from", - "display", + "from", + "display", ] } borsh = { version = "0.10.3", default-features = false } # substrate @@ -52,7 +52,8 @@ ics23 = { git = "https://github.com/cosmos/ics23", rev = "74ce807b7be39a7e0afb4e #guest-chain cf-guest = { path = "../../light-clients/cf-guest" } -guestchain = { git = "https://github.com/composableFi/emulated-light-client/" , default-features = false } +icsxx-cf-solana = { path = "../../light-clients/icsxx-cf-solana" } +guestchain = { git = "https://github.com/composableFi/emulated-light-client/", default-features = false } ed25519-dalek = { version = "2.1.1", default-features = false, features = ["pkcs8"] } grandpa-client-primitives = { package = "grandpa-light-client-primitives", path = "../../algorithms/grandpa/primitives", default-features = false } @@ -81,7 +82,7 @@ sp-consensus-aura = { git = "https://github.com/paritytech/substrate", branch = simple-iavl = { path = "simple-iavl", default-features = false } sp-consensus-grandpa = { git = "https://github.com/paritytech/substrate", branch = "polkadot-v0.9.43", default-features = false, optional = true } finality-grandpa = { version = "0.16.2", features = [ - "derive-codec", + "derive-codec", ], default-features = false, optional = true } [dependencies.ibc] @@ -132,79 +133,80 @@ default = ["std"] # testing = ["ibc/mocks"] testing = [] std = [ - "codec/std", - "log/std", - "scale-info/std", - "serde/std", - "serde_json/std", - "sha2/std", - "frame-benchmarking/std", - "frame-support/std", - "frame-system/std", - "sp-runtime/std", - "sp-core/std", - "sp-std/std", - "sp-io/std", - "sp-trie/std", - "balances/std", - "cumulus-primitives-core/std", - "pallet-assets/std", - "ibc/std", - "ibc-proto/std", - "ibc-primitives/std", - "parachain-info/std", - "grandpa-client-primitives/std", - "beefy-client-primitives/std", - "light-client-common/std", - "ics10-grandpa/std", - "ics11-beefy/std", - "ics07-tendermint/std", - "sp-consensus-grandpa/std", - "sp-consensus-grandpa/std", - "finality-grandpa/std", - "hex/std", - "pallet-timestamp/std", - "ics08-wasm/std", - "pallet-ibc-ping?/std", - "simple-iavl/std", - "chrono/std", - "ics23/std", - "orml-tokens/std", - "orml-traits/std", - "pallet-aura?/std", - "pallet-membership?/std", - "prost/std", - "sp-consensus-aura?/std", - "sp-consensus-slots?/std", - "sp-keystore/std", - "tendermint/std", + "codec/std", + "log/std", + "scale-info/std", + "serde/std", + "serde_json/std", + "sha2/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "sp-runtime/std", + "sp-core/std", + "sp-std/std", + "sp-io/std", + "sp-trie/std", + "balances/std", + "cumulus-primitives-core/std", + "pallet-assets/std", + "ibc/std", + "ibc-proto/std", + "ibc-primitives/std", + "parachain-info/std", + "grandpa-client-primitives/std", + "beefy-client-primitives/std", + "light-client-common/std", + "ics10-grandpa/std", + "ics11-beefy/std", + "ics07-tendermint/std", + "sp-consensus-grandpa/std", + "sp-consensus-grandpa/std", + "finality-grandpa/std", + "hex/std", + "pallet-timestamp/std", + "ics08-wasm/std", + "pallet-ibc-ping?/std", + "simple-iavl/std", + "chrono/std", + "ics23/std", + "orml-tokens/std", + "orml-traits/std", + "pallet-aura?/std", + "pallet-membership?/std", + "prost/std", + "sp-consensus-aura?/std", + "sp-consensus-slots?/std", + "sp-keystore/std", + "tendermint/std", ] runtime-benchmarks = [ - "frame-benchmarking", - "pallet-ibc-ping", - "frame-system/runtime-benchmarks", - "frame-support/runtime-benchmarks", - "hex-literal", - "pallet-assets/runtime-benchmarks", - "pallet-timestamp/runtime-benchmarks", - "pallet-aura", - "pallet-membership", - "pallet-assets", - "balances", - "sp-consensus-slots", - "sp-consensus-aura/serde", - "sp-consensus-aura/std", - "ibc-primitives/runtime-benchmarks", - "sp-consensus-grandpa", - "finality-grandpa", - "pallet-ibc-ping/runtime-benchmarks", - "frame-benchmarking?/runtime-benchmarks", - "orml-tokens/runtime-benchmarks", - "pallet-assets?/runtime-benchmarks", - "balances?/runtime-benchmarks", - "pallet-membership?/runtime-benchmarks", - "sp-runtime/runtime-benchmarks" + "frame-benchmarking", + "pallet-ibc-ping", + "frame-system/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "hex-literal", + "pallet-assets/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-aura", + "pallet-membership", + "pallet-assets", + "balances", + "sp-consensus-slots", + "sp-consensus-aura/serde", + "sp-consensus-aura/std", + "ibc-primitives/runtime-benchmarks", + "sp-consensus-grandpa", + "finality-grandpa", + "pallet-ibc-ping/runtime-benchmarks", + "frame-benchmarking?/runtime-benchmarks", + "orml-tokens/runtime-benchmarks", + "pallet-assets?/runtime-benchmarks", + "balances?/runtime-benchmarks", + "pallet-membership?/runtime-benchmarks", + "sp-runtime/runtime-benchmarks" ] try-runtime = ["frame-support/try-runtime"] +experimental-cf-solana = [] diff --git a/contracts/pallet-ibc/src/light_clients.rs b/contracts/pallet-ibc/src/light_clients.rs index 011e9a355..6d9e77d43 100644 --- a/contracts/pallet-ibc/src/light_clients.rs +++ b/contracts/pallet-ibc/src/light_clients.rs @@ -43,6 +43,10 @@ use ics11_beefy::{ client_message::BEEFY_CLIENT_MESSAGE_TYPE_URL, client_state::BEEFY_CLIENT_STATE_TYPE_URL, consensus_state::BEEFY_CONSENSUS_STATE_TYPE_URL, }; +use icsxx_cf_solana::{ + CF_SOLANA_CLIENT_MESSAGE_TYPE_URL, CF_SOLANA_CLIENT_STATE_TYPE_URL, + CF_SOLANA_CONSENSUS_STATE_TYPE_URL, CF_SOLANA_HEADER_TYPE_URL, CF_SOLANA_MISBEHAVIOUR_TYPE_URL, +}; use prost::Message; use sp_core::{crypto::ByteArray, ed25519, H256}; use sp_runtime::{ @@ -333,6 +337,8 @@ pub enum AnyClient { Tendermint(ics07_tendermint::client_def::TendermintClient), Wasm(ics08_wasm::client_def::WasmClient), Guest(cf_guest::client_def::GuestClient), + #[cfg(feature = "experimental-cf-solana")] + CfSolana(icsxx_cf_solana::client_def::CfSolanaClient), #[cfg(any(test, feature = "testing"))] Mock(ibc::mock::client_def::MockClient), } @@ -344,6 +350,8 @@ pub enum AnyUpgradeOptions { Tendermint(ics07_tendermint::client_state::UpgradeOptions), Wasm(Box), Guest(cf_guest::client::UpgradeOptions), + #[cfg(feature = "experimental-cf-solana")] + CfSolana(icsxx_cf_solana::client::UpgradeOptions), #[cfg(any(test, feature = "testing"))] Mock(()), } @@ -360,6 +368,9 @@ pub enum AnyClientState { Wasm(ics08_wasm::client_state::ClientState), #[ibc(proto_url = "GUEST_CLIENT_STATE_TYPE_URL")] Guest(cf_guest::ClientState), + #[cfg(feature = "experimental-cf-solana")] + #[ibc(proto_url = "CF_SOLANA_CLIENT_STATE_TYPE_URL")] + CfSolana(icsxx_cf_solana::ClientState), #[cfg(any(test, feature = "testing"))] #[ibc(proto_url = "MOCK_CLIENT_STATE_TYPE_URL")] Mock(ibc::mock::client_state::MockClientState), @@ -435,6 +446,9 @@ pub enum AnyConsensusState { Wasm(ics08_wasm::consensus_state::ConsensusState), #[ibc(proto_url = "GUEST_CONSENSUS_STATE_TYPE_URL")] Guest(cf_guest::ConsensusState), + #[cfg(feature = "experimental-cf-solana")] + #[ibc(proto_url = "CF_SOLANA_CONSENSUS_STATE_TYPE_URL")] + CfSolana(icsxx_cf_solana::ConsensusState), #[cfg(any(test, feature = "testing"))] #[ibc(proto_url = "MOCK_CONSENSUS_STATE_TYPE_URL")] Mock(ibc::mock::client_state::MockConsensusState), @@ -462,6 +476,9 @@ pub enum AnyClientMessage { Wasm(ics08_wasm::client_message::ClientMessage), #[ibc(proto_url = "GUEST_CLIENT_MESSAGE_TYPE_URL")] Guest(cf_guest::ClientMessage), + #[cfg(feature = "experimental-cf-solana")] + #[ibc(proto_url = "CF_SOLANA_CLIENT_MESSAGE_TYPE_URL")] + CfSolana(icsxx_cf_solana::ClientMessage), #[cfg(any(test, feature = "testing"))] #[ibc(proto_url = "MOCK_CLIENT_MESSAGE_TYPE_URL")] Mock(ibc::mock::header::MockClientMessage), @@ -565,6 +582,11 @@ impl From for Any { }, AnyClientMessage::Guest(msg) => Any { type_url: GUEST_CLIENT_MESSAGE_TYPE_URL.to_string(), value: msg.encode_vec() }, + #[cfg(feature = "experimental-cf-solana")] + AnyClientMessage::CfSolana(msg) => Any { + type_url: CF_SOLANA_CLIENT_MESSAGE_TYPE_URL.to_string(), + value: msg.encode_to_vec().expect("encode_vec failed"), + }, #[cfg(any(test, feature = "testing"))] AnyClientMessage::Mock(_msg) => panic!("MockHeader can't be serialized"), } diff --git a/light-clients/ics07-tendermint/src/client_state.rs b/light-clients/ics07-tendermint/src/client_state.rs index c1d80ccc7..95756a0ef 100644 --- a/light-clients/ics07-tendermint/src/client_state.rs +++ b/light-clients/ics07-tendermint/src/client_state.rs @@ -44,6 +44,7 @@ use ibc::{ }; pub const TENDERMINT_CLIENT_STATE_TYPE_URL: &str = "/ibc.lightclients.tendermint.v1.ClientState"; + #[derive(PartialEq, Eq, Debug, Clone, Serialize, Deserialize)] pub struct ClientState { pub chain_id: ChainId, diff --git a/light-clients/icsxx-cf-solana/Cargo.toml b/light-clients/icsxx-cf-solana/Cargo.toml index 030d74129..2c9dacf8f 100644 --- a/light-clients/icsxx-cf-solana/Cargo.toml +++ b/light-clients/icsxx-cf-solana/Cargo.toml @@ -8,10 +8,16 @@ edition = "2021" borsh = "0.10" bytemuck = { version = "1.14", default-features = false, features = ["must_cast"] } derive_more = { version = "0.99", features = ["from"], default-features = false } -prost = { version = "0.11", features = ["prost-derive"], default-features = false } -prost-12 = { package = "prost", version = "0.12", default-features = false } +prost = { version = "0.12", features = ["prost-derive"], default-features = false } +prost-11 = { package = "prost", version = "0.11", default-features = false } ed25519-consensus = { version = "2", default-features = false } serde = { version = "1.0", default-features = false, features = ["derive"] } +proto-utils = { git = "https://github.com/composableFi/emulated-light-client", default-features = false } +num_enum = { version = "0.7.3", default-features = false } +static_assertions = "1.1.0" +assert_matches = "1.5.0" +rand_chacha = { version = "0.3.1", default-features = false } +rand = { version = "0.8.5", default-features = false } # New IBC ibc-core-client-context = { git = "https://github.com/mina86/ibc-rs", rev = "6015aea441d4660f7f7ecd89b5e770a993448089", default-features = false } @@ -33,13 +39,18 @@ trie-ids = { git = "https://github.com/ComposableFi/emulated-light-client/", def sealable-trie = { git = "https://github.com/ComposableFi/emulated-light-client/", features = ["borsh"], default-features = false } stdx = { git = "https://github.com/ComposableFi/emulated-light-client/", default-features = false } cf-guest-upstream = { package = "cf-guest", git = "https://github.com/ComposableFi/emulated-light-client/", default-features = false } +bitflags = "2.4.2" +solana-sdk = { git = "https://github.com/ComposableFi/mantis-solana", rev = "d92c52575aaf430ca1f06bc2589f26b0fedde7bf" } +itertools = "0.11.0" +bincode = "1.3.3" [build-dependencies] -prost-build = { version = "0.11", default-features = false } +prost-build = { version = "0.12", default-features = false } [dev-dependencies] insta = { version = "1.34.0" } rand = { version = "0.8.5" } +hex = "0.4.3" guestchain = { git = "https://github.com/ComposableFi/emulated-light-client/", default-features = false, features = ["test_utils"] } lib = { git = "https://github.com/ComposableFi/emulated-light-client/", default-features = false, features = ["test_utils"] } diff --git a/light-clients/icsxx-cf-solana/build.rs b/light-clients/icsxx-cf-solana/build.rs new file mode 100644 index 000000000..c9e3f5f87 --- /dev/null +++ b/light-clients/icsxx-cf-solana/build.rs @@ -0,0 +1,6 @@ +fn main() -> std::io::Result<()> { + prost_build::Config::new() + .enable_type_names() + .include_file("messages.rs") + .compile_protos(&["src/proto/cf-solana.proto"], &["src/proto/"]) +} diff --git a/light-clients/icsxx-cf-solana/src/client.rs b/light-clients/icsxx-cf-solana/src/client.rs index 6b3a6e35e..f4275a958 100644 --- a/light-clients/icsxx-cf-solana/src/client.rs +++ b/light-clients/icsxx-cf-solana/src/client.rs @@ -1,42 +1,71 @@ +use crate::{client_def::CfSolanaClient, error::Error, proto, Header, CLIENT_TYPE}; use alloc::string::{String, ToString}; - use ibc::{ core::{ics02_client::height::Height, ics24_host::identifier::ClientId}, timestamp::Timestamp, }; -use lib::hash::CryptoHash; +use proto_utils::BadMessage; use serde::{Deserialize, Serialize}; +use solana_sdk::{clock::Slot, pubkey::Pubkey}; +use std::time::Duration; + +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct ClientState { + /// Highest available Solana slot number. + pub latest_height: Slot, + + pub trusting_period_ns: u64, + + /// Whether client is frozen. + pub is_frozen: bool, -use crate::{client_def::GuestClient, error::Error, CLIENT_TYPE}; + /// Current validator's Public Key + pub current_leader: Pubkey, + + /// Genesis timestamp + pub genesis_time_ns: u64, + + /// Chain's slot duration. + pub slot_duration_ns: u64, +} -super::wrap!(cf_guest_upstream::ClientState as ClientState); -super::wrap!(impl proto for ClientState); +// super::wrap!(cf_guest_upstream::ClientState as ClientState); +// super::wrap!(impl proto for ClientState); -impl ClientState { +impl ClientState { + pub(crate) fn timestamp_for_slot_ns(&self, slot: Slot) -> u64 { + // TODO: calculate timestamp using the Clock account: + // https://github.com/jito-foundation/jito-solana/blob/5396abaad1df66ebcab1e93473ce1b7c9f4c9f6c/sdk/program/src/clock.rs#L195 + + self.genesis_time_ns + (slot * self.slot_duration_ns) + } +} + +impl ClientState { pub fn new( - genesis_hash: CryptoHash, - latest_height: guestchain::BlockHeight, + latest_height: Slot, trusting_period_ns: u64, - epoch_commitment: CryptoHash, - prev_epoch_commitment: Option, is_frozen: bool, + current_validator: Pubkey, + genesis_time_ns: u64, + slot_duration_ns: u64, ) -> Self { - Self(cf_guest_upstream::ClientState::new( - genesis_hash, + Self { latest_height, trusting_period_ns, - epoch_commitment, - prev_epoch_commitment, is_frozen, - )) + current_leader: current_validator, + genesis_time_ns, + slot_duration_ns, + } } - pub fn with_header(&self, header: &cf_guest_upstream::Header) -> Self { - Self(self.0.with_header(&header)) + pub fn update_unchecked(self, header: Header) -> Self { + Self { latest_height: header.slot(), ..self } } - pub fn frozen(&self) -> Self { - Self(self.0.frozen()) + pub fn into_frozen(self) -> Self { + Self { is_frozen: true, ..self } } /// Verify the time and height delays @@ -52,50 +81,52 @@ impl ClientState { // NOTE: delay time period is inclusive, so if current_time is earliest_time, then we // return no error https://github.com/cosmos/ibc-go/blob/9ebc2f81049869bc40c443ffb72d9f3e47afb4fc/modules/light-clients/07-tendermint/client_state.go#L306 if current_time.nanoseconds() < earliest_time { - return Err(Error::NotEnoughTimeElapsed { current_time, earliest_time }) + return Err(Error::NotEnoughTimeElapsed { current_time, earliest_time }); } let earliest_height = processed_height + delay_period_blocks; if current_height.revision_height < earliest_height { - return Err(Error::NotEnoughBlocksElapsed { current_height, earliest_height }) + return Err(Error::NotEnoughBlocksElapsed { current_height, earliest_height }); } Ok(()) } pub fn verify_height(&self, client_id: &ClientId, height: ibc::Height) -> Result<(), Error> { - if self.0.latest_height < height.revision_height.into() { + if self.latest_height < height.revision_height { return Err(Error::InsufficientHeight { - latest_height: Height::new(1, self.0.latest_height.into()), + latest_height: Height::new(1, self.latest_height.into()), target_height: height, - }) + }); } - if self.0.is_frozen { - return Err(Error::ClientFrozen { client_id: client_id.clone() }) + if self.is_frozen { + return Err(Error::ClientFrozen { client_id: client_id.clone() }); } Ok(()) } + + pub(crate) fn leader_for_slot(&self, _slot: Slot) -> Pubkey { + // TODO: implement the actual mapping from slot to leader (see + // `crate::solana::leader_schedule`) + self.current_leader.clone() + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct UpgradeOptions {} -impl ibc::core::ics02_client::client_state::ClientState for ClientState -where - PK: guestchain::PubKey + Send + Sync, - PK::Signature: Send + Sync, -{ +impl ibc::core::ics02_client::client_state::ClientState for ClientState { type UpgradeOptions = UpgradeOptions; - type ClientDef = GuestClient; + type ClientDef = CfSolanaClient; fn chain_id(&self) -> ibc::core::ics24_host::identifier::ChainId { ibc::core::ics24_host::identifier::ChainId::new(String::from("Solana"), 0) } fn client_def(&self) -> Self::ClientDef { - GuestClient::default() + CfSolanaClient::default() } fn client_type(&self) -> ibc::core::ics02_client::client_state::ClientType { @@ -103,11 +134,11 @@ where } fn latest_height(&self) -> ibc::Height { - Height::new(1, u64::from(self.0.latest_height)) + Height::new(1, u64::from(self.latest_height)) } fn frozen_height(&self) -> Option { - self.0.is_frozen.then(|| Height::new(1, u64::from(self.0.latest_height))) + self.is_frozen.then(|| Height::new(1, u64::from(self.latest_height))) } fn upgrade( @@ -116,69 +147,64 @@ where _upgrade_options: Self::UpgradeOptions, _chain_id: ibc::core::ics24_host::identifier::ChainId, ) -> Self { - self.0.latest_height = upgrade_height.revision_height.into(); + self.latest_height = upgrade_height.revision_height.into(); self } fn expired(&self, elapsed: core::time::Duration) -> bool { - elapsed.as_nanos() as u64 > self.0.trusting_period_ns + elapsed.as_nanos() as u64 > self.trusting_period_ns } fn encode_to_vec(&self) -> Result, ibc::protobuf::Error> { - Ok(self.0.encode()) + Ok(self.encode()) } } -#[cfg(test)] -mod tests { - use super::*; - - use guestchain::validators::MockPubKey; - - const ANY_MESSAGE: [u8; 116] = [ - 10, 34, 47, 108, 105, 103, 104, 116, 99, 108, 105, 101, 110, 116, 115, 46, 103, 117, 101, - 115, 116, 46, 118, 49, 46, 67, 108, 105, 101, 110, 116, 83, 116, 97, 116, 101, 18, 78, 10, - 32, 243, 148, 241, 41, 122, 49, 51, 253, 97, 145, 113, 22, 234, 164, 193, 183, 185, 48, - 160, 186, 69, 72, 144, 156, 126, 229, 103, 131, 220, 174, 140, 165, 16, 5, 24, 128, 128, - 144, 202, 210, 198, 14, 34, 32, 86, 12, 131, 131, 127, 125, 82, 54, 32, 207, 121, 149, 204, - 11, 121, 102, 180, 211, 111, 54, 0, 207, 247, 125, 195, 57, 10, 10, 80, 84, 86, 152, - ]; - - fn message() -> &'static [u8] { - &ANY_MESSAGE[38..] +impl From for proto::ClientState { + fn from(state: ClientState) -> Self { + Self::from(&state) } +} - const GENESIS_HASH: CryptoHash = CryptoHash([ - 243, 148, 241, 41, 122, 49, 51, 253, 97, 145, 113, 22, 234, 164, 193, 183, 185, 48, 160, - 186, 69, 72, 144, 156, 126, 229, 103, 131, 220, 174, 140, 165, - ]); - const EPOCH_COMMITMENT: CryptoHash = CryptoHash([ - 86, 12, 131, 131, 127, 125, 82, 54, 32, 207, 121, 149, 204, 11, 121, 102, 180, 211, 111, - 54, 0, 207, 247, 125, 195, 57, 10, 10, 80, 84, 86, 152, - ]); - - fn check(state: ClientState) { - let want = ClientState::::new( - GENESIS_HASH.clone(), - 5.into(), - 64000000000000, - EPOCH_COMMITMENT.clone(), - Some(EPOCH_COMMITMENT.clone()), - false, - ); - assert_eq!(want, state); +impl From<&ClientState> for proto::ClientState { + fn from(state: &ClientState) -> Self { + Self { + latest_height: state.latest_height.into(), + trusting_period_ns: state.trusting_period_ns, + is_frozen: state.is_frozen, + current_leader: state.current_leader.to_bytes().to_vec(), + genesis_time: state.genesis_time_ns, + slot_duration: state.slot_duration_ns, + } } +} - #[test] - fn test_decode_vec() { - check(ibc::protobuf::Protobuf::decode_vec(message()).unwrap()); +impl TryFrom for ClientState { + type Error = BadMessage; + fn try_from(msg: proto::ClientState) -> Result { + Self::try_from(&msg) } +} - #[test] - fn test_from_any() { - use ibc_proto::google::protobuf::Any; +impl TryFrom<&proto::ClientState> for ClientState { + type Error = BadMessage; - let any: Any = prost::Message::decode(ANY_MESSAGE.as_ref()).unwrap(); - check(any.try_into().unwrap()); + fn try_from(msg: &proto::ClientState) -> Result { + let current_leader_bytes: &[u8] = msg.current_leader.as_ref(); + let current_leader = Pubkey::try_from(current_leader_bytes).map_err(|_| BadMessage)?; + + Ok(Self { + latest_height: msg.latest_height.into(), + trusting_period_ns: msg.trusting_period_ns, + is_frozen: msg.is_frozen, + current_leader, + genesis_time_ns: msg.genesis_time, + slot_duration_ns: msg.slot_duration, + }) } } + +proto_utils::define_wrapper! { + proto: crate::proto::ClientState, + wrapper: ClientState, +} diff --git a/light-clients/icsxx-cf-solana/src/client_def.rs b/light-clients/icsxx-cf-solana/src/client_def.rs index b4030bc2b..38e9ded8d 100644 --- a/light-clients/icsxx-cf-solana/src/client_def.rs +++ b/light-clients/icsxx-cf-solana/src/client_def.rs @@ -1,10 +1,15 @@ -use core::str::FromStr; - -use guestchain::Signature; - -use crate::alloc::string::ToString; +use crate::{ + alloc::string::ToString, + error::Error, + solana::{ + entry::Entry, + shred::{merkle::SIZE_OF_MERKLE_PROOF_ENTRY, shred_code::ShredCode, Shred, ShredData}, + shredder::Shredder, + }, + ClientMessage, ClientState, ConsensusState as ClientConsensusState, Header, +}; use alloc::vec::Vec; -use guestchain::{PubKey, Verifier}; +use core::str::FromStr; use ibc::{ core::{ ics02_client::{ @@ -13,32 +18,27 @@ use ibc::{ client_state::ClientState as OtherClientState, error::Error as Ics02ClientError, }, + ics24_host::identifier::ClientId, ics26_routing::context::ReaderContext, }, protobuf::Protobuf, }; use prost::Message; -use crate::{error::Error, ClientMessage, ClientState, ConsensusState as ClientConsensusState}; - type Result = ::core::result::Result; #[derive(Clone, Debug, PartialEq, Eq)] -pub struct GuestClient(core::marker::PhantomData); +pub struct CfSolanaClient; -impl Default for GuestClient { +impl Default for CfSolanaClient { fn default() -> Self { - Self(core::marker::PhantomData) + Self } } -impl ClientDef for GuestClient -where - PK: PubKey + Send + Sync, - PK::Signature: Send + Sync, -{ - type ClientMessage = ClientMessage; - type ClientState = ClientState; +impl ClientDef for CfSolanaClient { + type ClientMessage = ClientMessage; + type ClientState = ClientState; type ConsensusState = ClientConsensusState; fn verify_client_message( @@ -48,7 +48,36 @@ where client_state: Self::ClientState, client_msg: Self::ClientMessage, ) -> Result<(), Ics02ClientError> { - client_state.0.do_verify_client_message(self, client_msg.0).map_err(convert) + match client_msg { + ClientMessage::Header(header) => { + // The client can't be updated if no shreds were received + let shreds = header.shreds; + let slot = shreds.slot(); + + // TODO: verify that the header is within trusting period + + let leader = client_state.leader_for_slot(slot); + + // Verify all shreds + shreds + .iter() + .try_for_each(|shred| { + shred.sanitize()?; + let _root = shred.verify_with_root(&leader)?; + Ok(()) + }) + .map_err(|err: Error| { + Ics02ClientError::implementation_specific(alloc::format!( + "shred verification failed: {err}", + )) + })?; + Ok(()) + }, + ClientMessage::Misbehaviour(_) => + return Err(Ics02ClientError::implementation_specific( + "misbehaviour not supported".to_string(), + )), + } } fn update_state( @@ -57,19 +86,17 @@ where _client_id: ibc::core::ics24_host::identifier::ClientId, client_state: Self::ClientState, client_msg: Self::ClientMessage, - ) -> Result< - (Self::ClientState, ibc::core::ics02_client::client_def::ConsensusUpdateResult), - Ics02ClientError, - > { - let header = match client_msg.0 { - cf_guest_upstream::ClientMessage::Header(header) => header, + ) -> Result<(Self::ClientState, ConsensusUpdateResult), Ics02ClientError> { + let header = match client_msg { + ClientMessage::Header(header) => header, _ => unreachable!("02-client will check for Header before calling update_state; qed"), }; - let header_consensus_state = ClientConsensusState::from(&header); + let header_consensus_state = + ClientConsensusState::from_header_and_client_state(&header, &client_state)?; let cs = Ctx::AnyConsensusState::wrap(&header_consensus_state).ok_or_else(|| { Error::UnknownConsensusStateType { description: "Ctx::AnyConsensusState".to_string() } })?; - Ok((client_state.with_header(&header), ConsensusUpdateResult::Single(cs))) + Ok((client_state.update_unchecked(header), ConsensusUpdateResult::Single(cs))) } fn update_state_on_misbehaviour( @@ -77,7 +104,7 @@ where client_state: Self::ClientState, _client_msg: Self::ClientMessage, ) -> Result { - Ok(client_state.frozen()) + Ok(client_state.into_frozen()) } fn check_for_misbehaviour( @@ -87,12 +114,15 @@ where client_state: Self::ClientState, client_msg: Self::ClientMessage, ) -> Result { - let client_id = convert(client_id); - let ctx = CommonContext::new(ctx); - client_state - .0 - .do_check_for_misbehaviour(ctx, &client_id, client_msg.0) - .map_err(convert) + match client_msg { + ClientMessage::Header(header) => + Self::check_header_for_misbehaviour(ctx, &client_id, &client_state, &header) + .map_err(Into::into), + ClientMessage::Misbehaviour(_) => + return Err(Ics02ClientError::implementation_specific( + "misbehaviour not supported".to_string(), + )), + } } fn verify_upgrade_and_update_state( @@ -222,7 +252,7 @@ where commitment: ibc::core::ics04_channel::commitment::PacketCommitment, ) -> Result<(), Ics02ClientError> { client_state.verify_height(client_id, height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let path = ibc_core_host_types::path::CommitmentPath { port_id: convert(port_id), @@ -248,7 +278,7 @@ where ) -> Result<(), Ics02ClientError> { // client state height = consensus state height client_state.verify_height(client_id, height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let path = ibc_core_host_types::path::AckPath { port_id: convert(port_id), @@ -272,7 +302,7 @@ where sequence: ibc::core::ics04_channel::packet::Sequence, ) -> Result<(), Ics02ClientError> { client_state.verify_height(client_id, height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let path = ibc_core_host_types::path::SeqRecvPath(convert(port_id), convert(channel_id)); let mut seq_bytes = Vec::new(); @@ -294,7 +324,7 @@ where sequence: ibc::core::ics04_channel::packet::Sequence, ) -> Result<(), Ics02ClientError> { client_state.verify_height(client_id, height)?; - verify_delay_passed::(ctx, height, connection_end)?; + verify_delay_passed::(ctx, height, connection_end)?; let path = ibc_core_host_types::path::ReceiptPath { port_id: convert(port_id), @@ -305,7 +335,39 @@ where } } -fn verify_delay_passed( +impl CfSolanaClient { + fn check_header_for_misbehaviour( + ctx: &Ctx, + client_id: &ClientId, + client_state: &ClientState, + header: &Header, + ) -> Result { + let height = header.height(); + + // If we received an update from the past... + if height <= client_state.latest_height() { + // ...and we have the consensus state for that height, we need to check if + // they're the same, otherwise we have a misbehaviour. + if let Ok(existing_consensus_state) = ctx.consensus_state(&client_id, height) { + let header_consensus_state = + ClientConsensusState::from_header_and_client_state(&header, &client_state)?; + let new_consensus_state = Ctx::AnyConsensusState::wrap(&header_consensus_state) + .ok_or_else(|| Error::UnknownConsensusStateType { + description: "Ctx::AnyConsensusState".to_string(), + })?; + + // The consensus state is different, so we have a misbehaviour. + if existing_consensus_state != new_consensus_state { + return Ok(true); + } + } + } + + Ok(false) + } +} + +fn verify_delay_passed( ctx: &Ctx, height: ibc::Height, connection_end: &ibc::core::ics03_connection::connection::ConnectionEnd, @@ -325,7 +387,7 @@ fn verify_delay_passed( let delay_period_height = ctx.block_delay(delay_period_time); let delay_period_time_u64 = u64::try_from(delay_period_time.as_nanos()).unwrap(); - ClientState::::verify_delay_passed( + ClientState::verify_delay_passed( current_timestamp, current_height, processed_time.nanoseconds(), @@ -336,120 +398,6 @@ fn verify_delay_passed( .map_err(|e| e.into()) } -impl Verifier for GuestClient { - fn verify(&self, message: &[u8], pubkey: &PK, signature: &PK::Signature) -> bool { - (|| { - let pubkey = pubkey.as_bytes(); - let pubkey = ed25519_consensus::VerificationKey::try_from(&pubkey[..]).ok()?; - let signature = signature.as_bytes(); - let sig = ed25519_consensus::Signature::try_from(&signature[..]).ok()?; - pubkey.verify(&sig, message).ok()?; - Some(()) - })() - .is_some() - } -} - -#[derive(bytemuck::TransparentWrapper)] -#[repr(transparent)] -#[transparent(Ctx)] -struct CommonContext { - ctx: Ctx, - _ph: core::marker::PhantomData, -} - -impl CommonContext { - fn new(ctx: &Ctx) -> &Self { - bytemuck::TransparentWrapper::wrap_ref(ctx) - } -} - -type NewResult = Result; - -impl cf_guest_upstream::CommonContext - for CommonContext -{ - type ConversionError = core::convert::Infallible; - type AnyClientState = ClientState; - type AnyConsensusState = ClientConsensusState; - - fn host_metadata( - &self, - ) -> NewResult<(ibc_primitives::Timestamp, ibc_core_client_types::Height)> { - unimplemented!("host_metadata") - } - - fn set_client_state( - &mut self, - _client_id: &ibc_core_host_types::identifiers::ClientId, - _state: ClientState, - ) -> NewResult<()> { - unimplemented!("set_client_state") - } - - fn consensus_state( - &self, - _client_id: &ibc_core_host_types::identifiers::ClientId, - _height: ibc_core_client_types::Height, - ) -> NewResult { - unimplemented!("consensus_state") - } - - fn consensus_state_neighbourhood( - &self, - client_id: &ibc_core_host_types::identifiers::ClientId, - height: ibc_core_client_types::Height, - ) -> NewResult> { - use cf_guest_upstream::Neighbourhood; - - let res: Result<_, Ics02ClientError> = (|| { - let client_id = convert(client_id); - let height = convert(height); - Ok(if let Some(state) = self.ctx.maybe_consensus_state(&client_id, height)? { - Neighbourhood::This(state) - } else { - let prev = self.ctx.prev_consensus_state(&client_id, height)?; - let next = self.ctx.next_consensus_state(&client_id, height)?; - Neighbourhood::Neighbours(prev, next) - }) - })(); - match res { - Ok(res) => Ok(res.map(|state: Ctx::AnyConsensusState| { - // TODO(mina86): propagate error rather than unwrapping - let state: Self::AnyConsensusState = state.downcast().unwrap(); - state - })), - Err(err) => Err(convert(err)), - } - } - - fn store_consensus_state_and_metadata( - &mut self, - _client_id: &ibc_core_host_types::identifiers::ClientId, - _height: ibc_core_client_types::Height, - _consensus: Self::AnyConsensusState, - _host_timestamp: ibc_primitives::Timestamp, - _host_height: ibc_core_client_types::Height, - ) -> NewResult { - unimplemented!("store_consensus_state_and_metadata") - } - - fn delete_consensus_state_and_metadata( - &mut self, - _client_id: &ibc_core_host_types::identifiers::ClientId, - _height: ibc_core_client_types::Height, - ) -> NewResult { - unimplemented!("delete_consensus_state_and_metadata") - } - - fn earliest_consensus_state( - &self, - _client_id: &ibc_core_host_types::identifiers::ClientId, - ) -> NewResult> { - unimplemented!("earliest_consensus_state") - } -} - // Helper wrappers fn verify( @@ -559,3 +507,60 @@ impl, TE: ConvertFrom> ConvertFrom(&bytes) + }).collect::, _>>().unwrap(); + for s in &shreds { + match &s { + Shred::ShredCode(s) => match &s { + ShredCode::Legacy(_) => { + panic!() + }, + ShredCode::Merkle(s) => { + std::println!("payload: {}", hex::encode(s.payload())); + + let proof_size = s.proof_size().unwrap(); + let proof_offset = s.proof_offset().unwrap(); + + let proof_size = usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; + let merkle_proof = + s.payload.get(proof_offset..proof_offset + proof_size).unwrap(); + std::println!("proof: {}", hex::encode(merkle_proof)); + std::println!("node: {}", hex::encode(s.merkle_node().unwrap())); + }, + }, + Shred::ShredData(s) => match s { + ShredData::Legacy(_) => { + panic!() + }, + ShredData::Merkle(s) => { + std::println!("payload: {}", hex::encode(s.payload())); + + let proof_size = s.proof_size().unwrap(); + let proof_offset = s.proof_offset().unwrap(); + + let proof_size = usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; + let merkle_proof = + s.payload.get(proof_offset..proof_offset + proof_size).unwrap(); + std::println!("proof: {}", hex::encode(merkle_proof)); + + std::println!("node: {}", hex::encode(s.merkle_node().unwrap())); + }, + }, + } + } + let shreds = shreds.iter().collect::>(); + let payload = Shredder::deshred(shreds.as_slice()).unwrap(); + let entries = bincode::deserialize::>(&payload).unwrap(); + for entry in &entries { + std::println!("{}", hex::encode(&entry.hash.to_bytes())); + } +} diff --git a/light-clients/icsxx-cf-solana/src/client_impls.rs b/light-clients/icsxx-cf-solana/src/client_impls.rs index a1fe79326..8161fe5e0 100644 --- a/light-clients/icsxx-cf-solana/src/client_impls.rs +++ b/light-clients/icsxx-cf-solana/src/client_impls.rs @@ -60,70 +60,6 @@ pub trait CommonContext { -> Result>; } -// impl ibc::ClientStateCommon for ClientState { -// fn verify_consensus_state(&self, consensus_state: Any) -> Result { -// ConsensusState::try_from(consensus_state)?; -// Ok(()) -// } - -// fn client_type(&self) -> ibc::ClientType { -// ibc::ClientType::new(super::CLIENT_TYPE).unwrap() -// } - -// fn latest_height(&self) -> ibc::Height { -// ibc::Height::new(0, self.latest_height.into()).unwrap() -// } - -// fn validate_proof_height(&self, proof_height: ibc::Height) -> Result { -// let latest_height = self.latest_height(); -// if proof_height <= latest_height { -// Ok(()) -// } else { -// Err(ibc::ClientError::InvalidProofHeight { -// latest_height, -// proof_height, -// }) -// } -// } - -// /// Panics since client upgrades aren’t supported. -// fn verify_upgrade_client( -// &self, -// _upgraded_client_state: Any, -// _upgraded_consensus_state: Any, -// _proof_upgrade_client: ibc::CommitmentProofBytes, -// _proof_upgrade_consensus_state: ibc::CommitmentProofBytes, -// _root: &ibc::CommitmentRoot, -// ) -> Result { unimplemented!("IBC cilent upgrades are currently not supported") -// } - -// /// Verifies membership proof. -// /// -// /// See [`proof::verify`] for documentation of the proof format. -// fn verify_membership( -// &self, -// prefix: &ibc::CommitmentPrefix, -// proof: &ibc::CommitmentProofBytes, -// root: &ibc::CommitmentRoot, -// path: ibc::path::Path, -// value: Vec, -// ) -> Result { let value = Some(value.as_slice()); proof::verify(prefix, proof, root, path, -// value).map_err(Into::into) -// } - -// /// Verifies membership proof. -// /// -// /// See [`proof::verify`] for documentation of the proof format. -// fn verify_non_membership( -// &self, -// prefix: &ibc::CommitmentPrefix, -// proof: &ibc::CommitmentProofBytes, -// root: &ibc::CommitmentRoot, -// path: ibc::path::Path, -// ) -> Result { proof::verify(prefix, proof, root, path, None).map_err(Into::into) -// } -// } - impl From for ibc::ClientError { fn from(err: proof::VerifyError) -> Self { use ::ibc::core::ics23_commitment::error::Error; @@ -138,139 +74,6 @@ impl From for ibc::ClientError { } } -// impl ibc::ClientStateExecution for ClientState -// where -// E: ibc::ExecutionContext + ibc::ClientExecutionContext + CommonContext, -// ::AnyClientState: From>, -// ::AnyConsensusState: From, -// { -// fn initialise( -// &self, -// ctx: &mut E, -// client_id: &ibc::ClientId, -// consensus_state: Any, -// ) -> Result { parse_client_id(client_id)?; let consensus_state = -// super::ConsensusState::try_from(consensus_state)?; - -// ctx.store_client_state( -// ibc::path::ClientStatePath::new(client_id.clone()), -// self.clone().into(), -// )?; -// ctx.store_consensus_state( -// ibc::path::ClientConsensusStatePath::new( -// client_id.clone(), -// 0, -// u64::from(self.latest_height), -// ), -// consensus_state.into(), -// )?; - -// Ok(()) -// } - -// fn update_state( -// &self, -// ctx: &mut E, -// client_id: &ibc::ClientId, -// header: Any, -// ) -> Result> { let header = crate::proto::Header::try_from(header)?; let -// header = crate::Header::::try_from(header)?; let header_height = ibc::Height::new(0, -// header.block_header.block_height.into())?; - -// let (host_timestamp, host_height) = CommonContext::host_metadata(ctx)?; -// self.prune_oldest_consensus_state(ctx, client_id, host_timestamp)?; - -// let maybe_existing_consensus = -// CommonContext::consensus_state(ctx, client_id, header_height).ok(); -// if maybe_existing_consensus.is_none() { -// let new_consensus_state = ConsensusState::from(&header); -// let new_client_state = self.with_header(&header); - -// ctx.store_client_state( -// ibc::path::ClientStatePath::new(client_id.clone()), -// new_client_state.into(), -// )?; -// ctx.store_consensus_state_and_metadata( -// client_id, -// header_height, -// new_consensus_state.into(), -// host_timestamp, -// host_height, -// )?; -// } - -// Ok(alloc::vec![header_height]) -// } - -// fn update_state_on_misbehaviour( -// &self, -// ctx: &mut E, -// client_id: &ibc::ClientId, -// _client_message: Any, -// ) -> Result { ctx.store_client_state( ibc::path::ClientStatePath::new(client_id.clone()), -// self.frozen().into(), )?; Ok(()) -// } - -// fn update_state_on_upgrade( -// &self, -// _ctx: &mut E, -// _client_id: &ibc::ClientId, -// _upgraded_client_state: Any, -// _upgraded_consensus_state: Any, -// ) -> Result { Err(ibc::UpgradeClientError::Other { reason: "upgrade not -// supported".into(), } .into()) -// } -// } - -// impl ibc::ClientStateValidation for ClientState -// where -// V: ibc::ValidationContext -// + ibc::ClientValidationContext -// + CommonContext -// + guestchain::Verifier, -// { -// fn verify_client_message( -// &self, -// ctx: &V, -// client_id: &ibc::ClientId, -// client_message: Any, -// ) -> Result { self.verify_client_message(ctx, client_id, client_message) -// } - -// fn check_for_misbehaviour( -// &self, -// ctx: &V, -// client_id: &ibc::ClientId, -// client_message: Any, -// ) -> Result { self.check_for_misbehaviour(ctx, client_id, client_message) -// } - -// fn status( -// &self, -// ctx: &V, -// client_id: &ibc::ClientId, -// ) -> Result { if self.is_frozen { return Ok(ibc::Status::Frozen); } - -// let height = ibc::Height::new(0, self.latest_height.into())?; -// let consensus = CommonContext::consensus_state(ctx, client_id, height) -// .and_then(|state| state.try_into().map_err(error)); -// let consensus = match consensus { -// Ok(consensus) => consensus, -// Err(ibc::ClientError::ConsensusStateNotFound { .. }) => { -// return Ok(ibc::Status::Expired) -// } -// Err(err) => return Err(err), -// }; - -// let (host_timestamp, _height) = CommonContext::host_metadata(ctx)?; -// Ok(if self.consensus_has_expired(&consensus, host_timestamp) { -// ibc::Status::Expired -// } else { -// ibc::Status::Active -// }) -// } -// } - impl ClientState { pub fn verify_client_message( &self, @@ -328,7 +131,6 @@ impl ClientState { } } Ok(()) - // Err("Quorum not reached") })() .map_err(error) } diff --git a/light-clients/icsxx-cf-solana/src/consensus.rs b/light-clients/icsxx-cf-solana/src/consensus.rs index 8d0c5a60f..2e913056b 100644 --- a/light-clients/icsxx-cf-solana/src/consensus.rs +++ b/light-clients/icsxx-cf-solana/src/consensus.rs @@ -1,17 +1,28 @@ +use crate::{error::Error, ClientState, Header}; use core::{convert::Infallible, num::NonZeroU64}; - use lib::hash::CryptoHash; -use prost::Message as _; - -use crate::proto; +use solana_sdk::hash::Hash; super::wrap!(cf_guest_upstream::ConsensusState as ConsensusState); super::wrap!(impl Eq for ConsensusState); -super::wrap!(impl proto for ConsensusState); +// super::wrap!(impl proto for ConsensusState); impl ConsensusState { - pub fn new(block_hash: &CryptoHash, timestamp_ns: NonZeroU64) -> Self { - Self(cf_guest_upstream::ConsensusState::new(block_hash, timestamp_ns)) + pub fn new(block_hash: &Hash, timestamp_ns: NonZeroU64) -> Self { + Self(cf_guest_upstream::ConsensusState::new( + &CryptoHash(block_hash.to_bytes()), + timestamp_ns, + )) + } + + pub fn from_header_and_client_state( + header: &Header, + client_state: &ClientState, + ) -> Result { + let hash = header.calculate_hash()?; + let timestamp_ns = client_state.timestamp_for_slot_ns(header.slot()); + let nanos = NonZeroU64::try_from(timestamp_ns).map_err(|_| Error::InvalidTimestamp)?; + Ok(Self::new(&hash, nanos)) } } @@ -28,70 +39,6 @@ impl ibc::core::ics02_client::client_consensus::ConsensusState for ConsensusStat } fn encode_to_vec(&self) -> Result, ibc::protobuf::Error> { - Ok(proto::ConsensusState::from(self).encode_to_vec()) - } -} - -impl From> for ConsensusState { - fn from(header: crate::Header) -> Self { - Self::from(&header.0) - } -} - -impl From<&crate::Header> for ConsensusState { - fn from(header: &crate::Header) -> Self { - Self::from(&header.0) - } -} - -impl From> for ConsensusState { - fn from(header: cf_guest_upstream::Header) -> Self { - Self::from(&header) - } -} - -impl From<&cf_guest_upstream::Header> for ConsensusState { - fn from(header: &cf_guest_upstream::Header) -> Self { - Self(header.into()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const ANY_MESSAGE: [u8; 85] = [ - 10, 37, 47, 108, 105, 103, 104, 116, 99, 108, 105, 101, 110, 116, 115, 46, 103, 117, 101, - 115, 116, 46, 118, 49, 46, 67, 111, 110, 115, 101, 110, 115, 117, 115, 83, 116, 97, 116, - 101, 18, 44, 10, 32, 74, 147, 61, 207, 26, 96, 73, 253, 54, 118, 91, 237, 36, 210, 58, 218, - 179, 236, 158, 187, 5, 231, 241, 133, 178, 150, 85, 151, 36, 160, 36, 105, 16, 128, 220, - 164, 128, 131, 220, 190, 228, 23, - ]; - - fn message() -> &'static [u8] { - &ANY_MESSAGE[41..] - } - - const BLOCK_HASH: CryptoHash = CryptoHash([ - 74, 147, 61, 207, 26, 96, 73, 253, 54, 118, 91, 237, 36, 210, 58, 218, 179, 236, 158, 187, - 5, 231, 241, 133, 178, 150, 85, 151, 36, 160, 36, 105, - ]); - - fn check(state: ConsensusState) { - let want = ConsensusState::new(&BLOCK_HASH, NonZeroU64::new(1713895499000000000).unwrap()); - assert_eq!(want, state); - } - - #[test] - fn test_decode_vec() { - check(ibc::protobuf::Protobuf::decode_vec(message()).unwrap()); - } - - #[test] - fn test_from_any() { - use ibc_proto::google::protobuf::Any; - - let any: Any = prost::Message::decode(ANY_MESSAGE.as_ref()).unwrap(); - check(any.try_into().unwrap()); + Ok(self.0.encode_to_vec().expect("encoding failed")) } } diff --git a/light-clients/icsxx-cf-solana/src/error.rs b/light-clients/icsxx-cf-solana/src/error.rs index 7c6cee943..98d78f05c 100644 --- a/light-clients/icsxx-cf-solana/src/error.rs +++ b/light-clients/icsxx-cf-solana/src/error.rs @@ -28,6 +28,22 @@ pub enum Error { InsufficientHeight { latest_height: Height, target_height: Height }, ClientFrozen { client_id: ClientId }, UnknownConsensusStateType { description: String }, + ShardsAreEmpty, + ShredsNotSorted, + ShredsContainDuplicates, + ShredsFromDifferentSlots, + ShredIsNotData, + Solana(crate::solana::Error), + NoDataShreds, + LastShredNotLastInSlot, + EntriesAreEmpty, + InvalidTimestamp, +} + +impl From for Error { + fn from(err: crate::solana::Error) -> Self { + Self::Solana(err) + } } impl fmt::Display for Error { diff --git a/light-clients/icsxx-cf-solana/src/header.rs b/light-clients/icsxx-cf-solana/src/header.rs index ae4ee2458..3e54c276c 100644 --- a/light-clients/icsxx-cf-solana/src/header.rs +++ b/light-clients/icsxx-cf-solana/src/header.rs @@ -1,2 +1,470 @@ -super::wrap!(cf_guest_upstream::Header as Header); -super::wrap!(impl proto for Header); +use crate::{ + error::Error, + proto, + solana::{ + blockstore::{get_completed_data_ranges, get_slot_entries_in_block}, + shred::{shred_code::ShredCode, shred_data::ShredData, *}, + }, +}; +use alloc::vec::Vec; +use derive_more::Deref; +use ibc::Height; +use proto_utils::BadMessage; +use solana_sdk::{clock::Slot, hash::Hash, signature::Signature}; +use std::{collections::BTreeSet, convert::From}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Header { + pub shreds: PreCheckedShreds, +} + +impl Header { + pub(crate) fn slot(&self) -> Slot { + assert!(!self.shreds.is_empty(), "Header must contain at least one shred"); + self.shreds[0].slot() + } + + pub(crate) fn height(&self) -> Height { + Height::new(1, self.slot() as u64) + } + + /// Calculate the block hash of the block represented by its shreds. + /// + /// The block hash is the hash of the last entry formed from data shreds. + /// If there are no data shreds, no last data shred in slot or it's not possible to form the + /// last entry an error is returned. + /// + /// TODO: since only the last entry is needed to calculate hash, consider filtering out all the + /// other shreds + pub fn calculate_hash(&self) -> Result { + let data_shreds = self.shreds.iter().filter(|s| s.is_data()).collect::>(); + if data_shreds.is_empty() { + return Err(Error::NoDataShreds); + } + + let last_data_shred = data_shreds.last().unwrap(); + if !last_data_shred.last_in_slot() { + return Err(Error::LastShredNotLastInSlot); + } + + let completed_data_indexes = data_shreds + .iter() + .filter_map(|s| if s.data_complete() { Some(s.index()) } else { None }) + .collect::>(); + let consumed = last_data_shred.index() + 1; + let completed_ranges = get_completed_data_ranges(0, &completed_data_indexes, consumed); + let entries = get_slot_entries_in_block(self.slot(), completed_ranges, &data_shreds)?; + let blockhash = entries.last().map(|entry| entry.hash).ok_or(Error::EntriesAreEmpty)?; + + // FIXME: verify the hash + + Ok(blockhash) + } +} + +/// An immutable array of shreds that have the following properties: +/// 1. Is non-empty +/// 2. Is sorted +/// 3. Doesn't contain duplicates +/// 4. All shreds have the same slot +/// 5. Only contains data shreds +#[derive(Clone, PartialEq, Eq, Debug, Deref)] +pub struct PreCheckedShreds(Vec); + +impl PreCheckedShreds { + /// Returns the (common) shreds' slot. Since the shreds are pre-checked, the array contains at + /// least one shred and all the shreds have same slot. + pub(crate) fn slot(&self) -> Slot { + self.0[0].slot() + } +} + +impl IntoIterator for PreCheckedShreds { + type Item = Shred; + type IntoIter = alloc::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl TryFrom> for PreCheckedShreds { + type Error = Error; + + /// Creates a new `PreCheckedShreds` from a `Vec`. The input shreds must satisfy the + /// properties of the `PreCheckedShreds` type (see the `PreCheckedShreds`'s documentation). + fn try_from(shreds: Vec) -> Result { + if shreds.is_empty() { + return Err(Error::ShardsAreEmpty); + } + + // TODO(mina86): use array_windows once it stabilises. + for pair in shreds.windows(2) { + let prev = &pair[0]; + let this = &pair[1]; + + if prev.slot() != this.slot() { + return Err(Error::ShredsFromDifferentSlots); + } + + if prev.index() > this.index() { + return Err(Error::ShredsNotSorted); + } + + if prev.index() == this.index() { + return Err(Error::ShredsContainDuplicates); + } + + if !prev.is_data() { + return Err(Error::ShredIsNotData); + } + } + + // Check if the last shred is the last in the slot, because it was ignored by + // the previous cycle. + if shreds.len() > 1 { + let last = shreds.last().unwrap(); + if !last.is_data() { + return Err(Error::ShredIsNotData); + } + } + + Ok(Self(shreds)) + } +} + +impl From
for proto::Header { + fn from(value: Header) -> Self { + Self::from(&value) + } +} + +impl TryFrom for Header { + type Error = BadMessage; + + fn try_from(msg: proto::Header) -> Result { + Self::try_from(&msg) + } +} + +impl TryFrom<&proto::ShredCommonHeader> for ShredCommonHeader { + type Error = BadMessage; + + fn try_from(value: &proto::ShredCommonHeader) -> Result { + Ok(ShredCommonHeader { + signature: Signature::try_from(value.signature.as_ref()).map_err(|_| BadMessage)?, + shred_variant: value + .shred_variant + .as_ref() + .ok_or(BadMessage)? + .try_into() + .map_err(|_| BadMessage)?, + slot: value.slot, + index: value.index, + version: value.version.try_into().map_err(|_| BadMessage)?, + fec_set_index: value.fec_set_index, + }) + } +} + +impl TryFrom<&proto::CodingShredHeader> for CodingShredHeader { + type Error = BadMessage; + + fn try_from(value: &proto::CodingShredHeader) -> Result { + Ok(CodingShredHeader { + num_data_shreds: value.num_data_shreds.try_into().map_err(|_| BadMessage)?, + num_coding_shreds: value.num_coding_shreds.try_into().map_err(|_| BadMessage)?, + position: value.position.try_into().map_err(|_| BadMessage)?, + }) + } +} + +impl TryFrom<&proto::DataShredHeader> for DataShredHeader { + type Error = BadMessage; + + fn try_from(value: &proto::DataShredHeader) -> Result { + Ok(DataShredHeader { + parent_offset: value.parent_offset.try_into().map_err(|_| BadMessage)?, + flags: value.flags.as_ref().ok_or(BadMessage)?.try_into()?, + size: value.size.try_into().map_err(|_| BadMessage)?, + }) + } +} + +// Implement TryFrom for ShredVariant if not already implemented +impl TryFrom<&proto::ShredVariant> for ShredVariant { + type Error = BadMessage; + + fn try_from(value: &proto::ShredVariant) -> Result { + match &value.variant { + Some(proto::shred_variant::Variant::LegacyCode(_)) => Ok(ShredVariant::LegacyCode), + Some(proto::shred_variant::Variant::LegacyData(_)) => Ok(ShredVariant::LegacyData), + Some(proto::shred_variant::Variant::MerkleCode(proto_merkle_code)) => + Ok(ShredVariant::MerkleCode { + proof_size: proto_merkle_code.proof_size as u8, + chained: proto_merkle_code.chained, + resigned: proto_merkle_code.resigned, + }), + Some(proto::shred_variant::Variant::MerkleData(proto_merkle_data)) => + Ok(ShredVariant::MerkleData { + proof_size: proto_merkle_data.proof_size as u8, + chained: proto_merkle_data.chained, + resigned: proto_merkle_data.resigned, + }), + None => Err(BadMessage), + } + } +} +// Implement TryFrom for ShredFlags if not already implemented +impl TryFrom<&proto::ShredFlags> for ShredFlags { + type Error = BadMessage; + + fn try_from(value: &proto::ShredFlags) -> Result { + Ok(ShredFlags::from_bits(value.bits.try_into().map_err(|_| BadMessage)?) + .ok_or(BadMessage)?) + } +} + +impl TryFrom<&proto::Header> for Header { + type Error = BadMessage; + + fn try_from(msg: &proto::Header) -> Result { + let shreds = msg + .shreds + .iter() + .map(|shred| { + Ok(match &shred.message { + Some(proto::shred::Message::ShredCode(proto_shred_code)) => { + match &proto_shred_code.message { + Some(proto::shred_code::Message::LegacyShredCode( + proto_legacy_shred_code, + )) => { + // Convert LegacyShredCode + let common_header: ShredCommonHeader = proto_legacy_shred_code + .common_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let coding_header: CodingShredHeader = proto_legacy_shred_code + .coding_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let payload = proto_legacy_shred_code.payload.clone(); + Shred::ShredCode(ShredCode::Legacy(legacy::ShredCode { + common_header, + coding_header, + payload, + })) + }, + Some(proto::shred_code::Message::MerkleShredCode( + proto_merkle_shred_code, + )) => { + // Convert MerkleShredCode + let common_header: ShredCommonHeader = proto_merkle_shred_code + .common_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let coding_header: CodingShredHeader = proto_merkle_shred_code + .coding_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let payload = proto_merkle_shred_code.payload.clone(); + Shred::ShredCode(ShredCode::Merkle(merkle::ShredCode { + common_header, + coding_header, + payload, + })) + }, + None => return Err(BadMessage), + } + }, + Some(proto::shred::Message::ShredData(proto_shred_data)) => { + match &proto_shred_data.message { + Some(proto::shred_data::Message::LegacyShredData( + proto_legacy_shred_data, + )) => { + // Convert LegacyShredData + let common_header: ShredCommonHeader = proto_legacy_shred_data + .common_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let data_header: DataShredHeader = proto_legacy_shred_data + .data_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let payload = proto_legacy_shred_data.payload.clone(); + Shred::ShredData(ShredData::Legacy(legacy::ShredData { + common_header, + data_header, + payload, + })) + }, + Some(proto::shred_data::Message::MerkleShredData( + proto_merkle_shred_data, + )) => { + // Convert MerkleShredData + let common_header: ShredCommonHeader = proto_merkle_shred_data + .common_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let data_header: DataShredHeader = proto_merkle_shred_data + .data_header + .as_ref() + .ok_or(BadMessage)? + .try_into()?; + let payload = proto_merkle_shred_data.payload.clone(); + Shred::ShredData(ShredData::Merkle(merkle::ShredData { + common_header, + data_header, + payload, + })) + }, + None => return Err(BadMessage), + } + }, + None => return Err(BadMessage), + }) + }) + .collect::, _>>()?; + + let shreds = PreCheckedShreds::try_from(shreds).map_err(|_| BadMessage)?; + Ok(Header { shreds }) + } +} + +impl From<&ShredCommonHeader> for proto::ShredCommonHeader { + fn from(header: &ShredCommonHeader) -> Self { + proto::ShredCommonHeader { + signature: header.signature.as_ref().into(), + shred_variant: Some((&header.shred_variant).into()), + slot: header.slot, + index: header.index, + version: header.version as _, + fec_set_index: header.fec_set_index, + } + } +} + +impl From<&CodingShredHeader> for proto::CodingShredHeader { + fn from(header: &CodingShredHeader) -> Self { + proto::CodingShredHeader { + num_data_shreds: header.num_data_shreds as _, + num_coding_shreds: header.num_coding_shreds as _, + position: header.position as _, + } + } +} + +impl From<&DataShredHeader> for proto::DataShredHeader { + fn from(header: &DataShredHeader) -> Self { + proto::DataShredHeader { + parent_offset: header.parent_offset as _, + flags: Some((&header.flags).into()), + size: header.size as _, + } + } +} + +impl From<&ShredVariant> for proto::ShredVariant { + fn from(variant: &ShredVariant) -> Self { + match variant { + ShredVariant::LegacyCode => proto::ShredVariant { + variant: Some(proto::shred_variant::Variant::LegacyCode(proto::LegacyCode {})), + }, + ShredVariant::LegacyData => proto::ShredVariant { + variant: Some(proto::shred_variant::Variant::LegacyData(proto::LegacyData {})), + }, + ShredVariant::MerkleCode { proof_size, chained, resigned } => proto::ShredVariant { + variant: Some(proto::shred_variant::Variant::MerkleCode(proto::MerkleCode { + proof_size: *proof_size as u32, + chained: *chained, + resigned: *resigned, + })), + }, + ShredVariant::MerkleData { proof_size, chained, resigned } => proto::ShredVariant { + variant: Some(proto::shred_variant::Variant::MerkleData(proto::MerkleData { + proof_size: *proof_size as u32, + chained: *chained, + resigned: *resigned, + })), + }, + } + } +} + +impl From<&ShredFlags> for proto::ShredFlags { + fn from(flags: &ShredFlags) -> Self { + proto::ShredFlags { bits: flags.bits() as _ } + } +} + +impl From<&Header> for proto::Header { + fn from(header: &Header) -> Self { + let proto_shreds = header + .shreds + .iter() + .map(|shred| match shred { + Shred::ShredCode(shred_code) => { + let proto_shred_code = match shred_code { + ShredCode::Legacy(legacy_shred_code) => + proto::shred_code::Message::LegacyShredCode(proto::LegacyShredCode { + common_header: Some((&legacy_shred_code.common_header).into()), + coding_header: Some((&legacy_shred_code.coding_header).into()), + payload: legacy_shred_code.payload.clone(), + }), + ShredCode::Merkle(merkle_shred_code) => + proto::shred_code::Message::MerkleShredCode(proto::MerkleShredCode { + common_header: Some((&merkle_shred_code.common_header).into()), + coding_header: Some((&merkle_shred_code.coding_header).into()), + payload: merkle_shred_code.payload.clone(), + }), + }; + + proto::Shred { + message: Some(proto::shred::Message::ShredCode(proto::ShredCode { + message: Some(proto_shred_code), + })), + } + }, + Shred::ShredData(shred_data) => { + let proto_shred_data = match shred_data { + ShredData::Legacy(legacy_shred_data) => + proto::shred_data::Message::LegacyShredData(proto::LegacyShredData { + common_header: Some((&legacy_shred_data.common_header).into()), + data_header: Some((&legacy_shred_data.data_header).into()), + payload: legacy_shred_data.payload.clone(), + }), + ShredData::Merkle(merkle_shred_data) => + proto::shred_data::Message::MerkleShredData(proto::MerkleShredData { + common_header: Some((&merkle_shred_data.common_header).into()), + data_header: Some((&merkle_shred_data.data_header).into()), + payload: merkle_shred_data.payload.clone(), + }), + }; + + proto::Shred { + message: Some(proto::shred::Message::ShredData(proto::ShredData { + message: Some(proto_shred_data), + })), + } + }, + }) + .collect(); + + proto::Header { shreds: proto_shreds } + } +} + +proto_utils::define_wrapper! { + proto: crate::proto::Header, + wrapper: Header, +} + +// super::impls!( Header); +// super::impls!(impl proto for Header); diff --git a/light-clients/icsxx-cf-solana/src/lib.rs b/light-clients/icsxx-cf-solana/src/lib.rs index 56b4fc564..82fb64fb3 100644 --- a/light-clients/icsxx-cf-solana/src/lib.rs +++ b/light-clients/icsxx-cf-solana/src/lib.rs @@ -13,8 +13,8 @@ pub mod error; mod header; mod message; mod misbehaviour; -pub mod proof; pub mod proto; +mod solana; pub use client::ClientState; pub use consensus::ConsensusState; @@ -25,9 +25,15 @@ pub use misbehaviour::Misbehaviour; use ibc::core::ics02_client::error::Error as ClientError; /// Client type of the guest blockchain’s light client. -pub const CLIENT_TYPE: &str = "cf-guest"; +pub const CLIENT_TYPE: &str = "cf-solana"; -pub use crate::proto::{BadMessage, DecodeError}; +pub const CF_SOLANA_CLIENT_MESSAGE_TYPE_URL: &'static str = proto::ClientMessage::IBC_TYPE_URL; +pub const CF_SOLANA_CLIENT_STATE_TYPE_URL: &'static str = proto::ClientState::IBC_TYPE_URL; +pub const CF_SOLANA_CONSENSUS_STATE_TYPE_URL: &'static str = proto::ConsensusState::IBC_TYPE_URL; +pub const CF_SOLANA_HEADER_TYPE_URL: &'static str = proto::Header::IBC_TYPE_URL; +pub const CF_SOLANA_MISBEHAVIOUR_TYPE_URL: &'static str = proto::Misbehaviour::IBC_TYPE_URL; + +pub use crate::proto::DecodeError; impl From for ClientError { fn from(err: DecodeError) -> Self { @@ -35,12 +41,6 @@ impl From for ClientError { } } -impl From for ClientError { - fn from(_: BadMessage) -> Self { - ClientError::implementation_specific("BadMessage".to_string()) - } -} - /// Returns digest of the value. /// /// This is used, among other places, as packet commitment. @@ -65,6 +65,173 @@ pub fn digest_with_client_id( lib::hash::CryptoHash::digestv(&[client_id.as_bytes(), b"\0", value]) } +#[macro_export] +macro_rules! impls { + ($Outer:ident) => { + impl core::fmt::Debug for $Outer { + fn fmt(&self, fmtr: &mut core::fmt::Formatter) -> core::fmt::Result { + self.0.fmt(fmtr) + } + } + + impl From<$Outer> for ibc_proto::google::protobuf::Any { + fn from(msg: $Outer) -> Self { + Self::from(&msg) + } + } + + impl From<&$Outer> for ibc_proto::google::protobuf::Any { + fn from(msg: &$Outer) -> Self { + let any = cf_guest_upstream::proto::Any::from(&msg.0); + Self { type_url: any.type_url, value: any.value } + } + } + + impl TryFrom for $Outer { + type Error = $crate::DecodeError; + fn try_from(any: ibc_proto::google::protobuf::Any) -> Result { + Self::try_from(&any) + } + } + + impl TryFrom<&ibc_proto::google::protobuf::Any> for $Outer { + type Error = $crate::DecodeError; + + fn try_from(any: &ibc_proto::google::protobuf::Any) -> Result { + Ok(Self(cf_guest_upstream::proto::AnyConvert::try_from_any( + &any.type_url, + &any.value, + )?)) + } + } + }; + + ( $Outer:ident) => { + impl From<$Outer> for ibc_proto::google::protobuf::Any { + fn from(msg: $Outer) -> Self { + Self::from(&msg) + } + } + + impl From<&$Outer> for ibc_proto::google::protobuf::Any { + fn from(msg: &$Outer) -> Self { + let any = ibc_proto::google::protobuf::Any::from(&msg); + Self { type_url: any.type_url, value: any.value } + } + } + + impl TryFrom for $Outer { + type Error = $crate::DecodeError; + fn try_from(any: ibc_proto::google::protobuf::Any) -> Result { + Self::try_from(&any) + } + } + + impl TryFrom<&ibc_proto::google::protobuf::Any> for $Outer { + type Error = $crate::DecodeError; + fn try_from(any: &ibc_proto::google::protobuf::Any) -> Result { + Ok(Self(cf_guest_upstream::proto::AnyConvert::try_from_any( + &any.type_url, + &any.value, + )?)) + } + } + }; + + (impl Default for $Outer:ident) => { + impl Default for $Outer { + fn default() -> Self { + Self(Default::default()) + } + } + }; + + (impl Default for $Outer:ident) => { + impl Default for $Outer { + fn default() -> Self { + Self(Default::default()) + } + } + }; + + (impl Eq for $Outer:ident) => { + impl core::cmp::PartialEq for $Outer { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } + } + impl core::cmp::Eq for $Outer {} + }; + + (impl proto for $Type:ident) => { + impl $crate::proto::$Type { + pub const IBC_TYPE_URL: &'static str = cf_guest_upstream::proto::$Type::IBC_TYPE_URL; + } + + impl From<$Type> for $crate::proto::$Type { + fn from(msg: $Type) -> Self { + Self(cf_guest_upstream::proto::$Type::from(&msg.0)) + } + } + + impl From<&$Type> for $crate::proto::$Type { + fn from(msg: &$Type) -> Self { + Self(cf_guest_upstream::proto::$Type::from(&msg.0)) + } + } + + impl TryFrom<$crate::proto::$Type> for $Type { + type Error = $crate::proto::BadMessage; + fn try_from(msg: $crate::proto::$Type) -> Result { + Self::try_from(&msg) + } + } + + impl TryFrom<&$crate::proto::$Type> for $Type { + type Error = $crate::proto::BadMessage; + fn try_from(msg: &$crate::proto::$Type) -> Result { + Ok(Self(cf_guest_upstream::$Type::try_from(&msg.0)?)) + } + } + + impl ibc::protobuf::Protobuf<$crate::proto::$Type> for $Type {} + }; + + (impl proto for $Type:ident) => { + impl $crate::proto::$Type { + pub const IBC_TYPE_URL: &'static str = cf_guest_upstream::proto::$Type::IBC_TYPE_URL; + } + + impl From<$Type> for $crate::proto::$Type { + fn from(msg: $Type) -> Self { + Self(cf_guest_upstream::proto::$Type::from(&msg.0)) + } + } + + impl From<&$Type> for $crate::proto::$Type { + fn from(msg: &$Type) -> Self { + Self(cf_guest_upstream::proto::$Type::from(&msg.0)) + } + } + + impl TryFrom<$crate::proto::$Type> for $Type { + type Error = $crate::proto::BadMessage; + fn try_from(msg: $crate::proto::$Type) -> Result { + Self::try_from(&msg) + } + } + + impl TryFrom<&$crate::proto::$Type> for $Type { + type Error = $crate::proto::BadMessage; + fn try_from(msg: &$crate::proto::$Type) -> Result { + Ok(Self(cf_guest_upstream::$Type::try_from(&msg.0)?)) + } + } + + impl ibc::protobuf::Protobuf<$crate::proto::$Type> for $Type {} + }; +} + macro_rules! wrap { ($($Inner:ident)::* as $Outer:ident) => { #[derive(Clone, derive_more::From, derive_more::Into)] diff --git a/light-clients/icsxx-cf-solana/src/message.rs b/light-clients/icsxx-cf-solana/src/message.rs index ad3a89487..57eeb04c2 100644 --- a/light-clients/icsxx-cf-solana/src/message.rs +++ b/light-clients/icsxx-cf-solana/src/message.rs @@ -1,52 +1,112 @@ -use guestchain::PubKey; +use crate::{ + proto, + proto::{client_message, client_message::Message}, + Header, Misbehaviour, +}; use prost::Message as _; +use proto_utils::{BadMessage, DecodeError}; -use crate::proto; +#[derive(Clone, PartialEq, Eq, Debug, derive_more::From, derive_more::TryInto)] +// For the time being allow large enum variants. Header is short of 400 bytes +// and Misbehaviour is short of 700. We may want to box the values if we run +// into stack size issues. +#[allow(clippy::large_enum_variant)] +pub enum ClientMessage { + Header(Header), + Misbehaviour(Misbehaviour), +} -super::wrap!(cf_guest_upstream::ClientMessage as ClientMessage); -super::wrap!(impl proto for ClientMessage); +impl ibc::core::ics02_client::client_message::ClientMessage for ClientMessage { + fn encode_to_vec(&self) -> Result, ibc::protobuf::Error> { + Ok(proto::ClientMessage::from(self).encode_to_vec()) + } +} -impl ClientMessage { - pub fn maybe_header_height(&self) -> Option { - if let cf_guest_upstream::ClientMessage::Header(hdr) = &self.0 { - let height = hdr.block_header.block_height; - Some(ibc::Height::new(1, height.into())) - } else { - None +impl From for Message { + fn from(msg: ClientMessage) -> Self { + Self::from(&msg) + } +} + +impl From<&ClientMessage> for Message { + fn from(msg: &ClientMessage) -> Self { + match msg { + ClientMessage::Header(msg) => Self::Header(msg.into()), + ClientMessage::Misbehaviour(msg) => Self::Misbehaviour(msg.into()), } } } -impl ibc::core::ics02_client::client_message::ClientMessage for ClientMessage -where - PK: PubKey + Send + Sync, - PK::Signature: Send + Sync, -{ - fn encode_to_vec(&self) -> Result, ibc::protobuf::Error> { - Ok(proto::ClientMessage::from(self).encode_to_vec()) +impl TryFrom for ClientMessage { + type Error = BadMessage; + fn try_from(msg: Message) -> Result { + Self::try_from(&msg) } } -impl From> for ClientMessage { - fn from(hdr: cf_guest_upstream::Header) -> Self { - Self(cf_guest_upstream::ClientMessage::Header(hdr)) +impl TryFrom<&Message> for ClientMessage { + type Error = BadMessage; + fn try_from(msg: &Message) -> Result { + match msg { + Message::Header(msg) => msg.try_into().map(Self::Header), + Message::Misbehaviour(mb) => mb.try_into().map(Self::Misbehaviour), + } } } -impl From> for ClientMessage { - fn from(hdr: crate::Header) -> Self { - Self(cf_guest_upstream::ClientMessage::Header(hdr.0)) +impl From for proto::ClientMessage { + fn from(msg: ClientMessage) -> Self { + Self::from(&msg) } } -impl From> for ClientMessage { - fn from(msg: cf_guest_upstream::Misbehaviour) -> Self { - Self(cf_guest_upstream::ClientMessage::Misbehaviour(msg)) +impl From<&ClientMessage> for proto::ClientMessage { + fn from(msg: &ClientMessage) -> Self { + let message = Some(match msg { + ClientMessage::Header(msg) => client_message::Message::Header(msg.into()), + ClientMessage::Misbehaviour(msg) => client_message::Message::Misbehaviour(msg.into()), + }); + Self { message } } } -impl From> for ClientMessage { - fn from(msg: crate::Misbehaviour) -> Self { - Self(cf_guest_upstream::ClientMessage::Misbehaviour(msg.0)) +impl TryFrom for ClientMessage { + type Error = BadMessage; + fn try_from(msg: proto::ClientMessage) -> Result { + Self::try_from(&msg) + } +} + +impl TryFrom<&proto::ClientMessage> for ClientMessage { + type Error = BadMessage; + fn try_from(msg: &proto::ClientMessage) -> Result { + msg.message.as_ref().ok_or(BadMessage).and_then(Self::try_from) + } +} + +proto_utils::define_wrapper! { + proto: proto::ClientMessage, + wrapper: ClientMessage, + custom_any +} + +impl proto_utils::AnyConvert for ClientMessage { + fn to_any(&self) -> (&'static str, alloc::vec::Vec) { + match self { + Self::Header(msg) => msg.to_any(), + Self::Misbehaviour(msg) => msg.to_any(), + } + } + + fn try_from_any(type_url: &str, value: &[u8]) -> Result { + if type_url.ends_with(proto::ClientMessage::IBC_TYPE_URL) { + Self::decode(value) + } else if type_url.ends_with(proto::Header::IBC_TYPE_URL) { + Header::decode(value).map(Self::Header) + } else if type_url.ends_with(proto::Misbehaviour::IBC_TYPE_URL) { + Misbehaviour::decode(value).map(Self::Misbehaviour) + } else { + Err(DecodeError::BadType) + } } } diff --git a/light-clients/icsxx-cf-solana/src/misbehaviour.rs b/light-clients/icsxx-cf-solana/src/misbehaviour.rs index af592f747..cb5945b9c 100644 --- a/light-clients/icsxx-cf-solana/src/misbehaviour.rs +++ b/light-clients/icsxx-cf-solana/src/misbehaviour.rs @@ -1,2 +1,41 @@ -super::wrap!(cf_guest_upstream::Misbehaviour as Misbehaviour); -super::wrap!(impl proto for Misbehaviour); +// super::wrap!(cf_guest_upstream::Misbehaviour as Misbehaviour); +// super::wrap!(impl proto for Misbehaviour); + +use crate::proto; +use proto_utils::BadMessage; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Misbehaviour { + _never: std::convert::Infallible, +} + +impl From for proto::Misbehaviour { + fn from(msg: Misbehaviour) -> Self { + Self::from(&msg) + } +} + +impl From<&Misbehaviour> for proto::Misbehaviour { + fn from(_msg: &Misbehaviour) -> Self { + todo!("Misbehaviour::from") + } +} + +impl TryFrom for Misbehaviour { + type Error = BadMessage; + fn try_from(msg: proto::Misbehaviour) -> Result { + Self::try_from(&msg) + } +} + +impl TryFrom<&proto::Misbehaviour> for Misbehaviour { + type Error = BadMessage; + fn try_from(_msg: &proto::Misbehaviour) -> Result { + todo!("Misbehaviour::try_from") + } +} + +proto_utils::define_wrapper! { + proto: proto::Misbehaviour, + wrapper: Misbehaviour, +} diff --git a/light-clients/icsxx-cf-solana/src/proof.rs b/light-clients/icsxx-cf-solana/src/proof.rs deleted file mode 100644 index 4966ce4c6..000000000 --- a/light-clients/icsxx-cf-solana/src/proof.rs +++ /dev/null @@ -1,195 +0,0 @@ -use core::str::FromStr; - -use guestchain::BlockHeader; -use ibc_core_host_types::path::{ - AckPath, ChannelEndPath, ClientConnectionPath, CommitmentPath, ConnectionPath, PortPath, - ReceiptPath, SeqAckPath, SeqRecvPath, SeqSendPath, -}; - -mod ibc { - pub use ibc::core::{ - ics02_client::error::Error as ClientError, - ics04_channel::packet::Sequence, - ics23_commitment::commitment::{CommitmentPrefix, CommitmentProofBytes, CommitmentRoot}, - ics24_host::{ - identifier, - identifier::{ChannelId, ClientId, ConnectionId, PortId}, - path, - }, - }; -} - -pub use cf_guest_upstream::proof::{GenerateError, IbcProof, VerifyError}; - -/// Generates a proof for given path. -/// -/// `block_header` is header whose hash will be the commitment root. It’s -/// state root must correspond to `trie`’s root. `path` specifies IBC path -/// of the value that needs proof. -/// -/// # Proof format -/// -/// In most cases, proof is Borsh-serialised `(guestchain::BlockHeader, -/// sealable_trie::proof::Proof)` pair. The header at the front is necessary to -/// determine state root (recall that `root` is the block hash and not state -/// root). -/// -/// However, if `path` is one of `SeqSend`, `SeqRecv` or `SeqAck` than proof -/// further contain two big-endian encoded `u64` numbers holding the other -/// two sequence numbers. -/// -/// For example, if `path` is `SeqRecv`, the `proof` must at the end include -/// send sequence number and ack sequence number. For example, if next send -/// sequence is `7`, next ack sequence is `5` and path is `SeqRecv` the -/// proof will end with `be(7) || be(5)` (where `be` denotes encoding 64-bit -/// number as big endian). -/// -/// This addition is necessary because sequence numbers are stored together -/// within a single trie value. For example, proving the next receive -/// sequence is `4` requires proving `be(7), be(4), be(5), be(0)]. For -/// verifier to know what value it checks, it needs to be provided all of -/// the sequence numbers. -/// -/// (Note that Borsh uses little endian to encode integers so the sequence -/// numbers cannot be simply borsh deserialised.) -pub fn generate( - block_header: &BlockHeader, - trie: &sealable_trie::Trie, - path: ibc::path::Path, -) -> Result { - let path = convert_old_path_to_new(path); - cf_guest_upstream::proof::generate(block_header, trie, path) -} - -/// Verifies a proof for given entry or lack of entry. -/// -/// `prefix` must be empty, `proof` and `root` must follow format described in -/// [`generate`] function. `path` indicates IBC path the proof is for and -/// `value` determines value or lack thereof expected at the path. -/// -/// # Value hash -/// -/// Since sealable trie doesn’t store values but only hashes, when verifying -/// membership proofs the value needs to be converted into a hash. There are -/// three cases: -/// -/// 1. If `path` includes client id, the hash of the value is calculated with the client id mixed -/// in; see [`super::digest_with_client_id`] function. -/// -/// 2. If `path` is `SeqSend`, `SeqRecv` or `SeqAck`, the `value` must be -/// `google.protobuf.UInt64Value` protobuf and hash is calculated as concatenation of the three -/// sequence numbers as described in [`generate`]. -/// -/// 3. Otherwise, the value is simply hashed. -pub fn verify( - prefix: &ibc::CommitmentPrefix, - proof: &ibc::CommitmentProofBytes, - root: &ibc::CommitmentRoot, - path: ibc::path::Path, - value: Option<&[u8]>, -) -> Result<(), VerifyError> { - verify_bytes(prefix.as_bytes(), proof.as_bytes(), root.as_bytes(), path, value) -} - -/// Verifies a proof for given entry or lack of entry. -/// -/// Like [`verify`] but takes slice arguments rather than IBC types. -pub fn verify_bytes( - prefix: &[u8], - proof: &[u8], - root: &[u8], - path: ibc::path::Path, - value: Option<&[u8]>, -) -> Result<(), VerifyError> { - cf_guest_upstream::proof::verify(prefix, proof, root, convert_old_path_to_new(path), value) -} - -fn convert_old_path_to_new(path: ibc::path::Path) -> ibc_core_host_types::path::Path { - match path { - ::ibc::core::ics24_host::Path::ClientType(_) => panic!("Not supported"), - ::ibc::core::ics24_host::Path::ClientState(e) => - ibc_core_host_types::path::Path::ClientState( - ibc_core_host_types::path::ClientStatePath( - ibc_core_host_types::identifiers::ClientId::from_str(e.0.as_str()).unwrap(), - ), - ), - ::ibc::core::ics24_host::Path::ClientConsensusState(e) => - ibc_core_host_types::path::Path::ClientConsensusState( - ibc_core_host_types::path::ClientConsensusStatePath { - client_id: ibc_core_host_types::identifiers::ClientId::from_str( - e.client_id.as_str(), - ) - .unwrap(), - revision_number: e.epoch, - revision_height: e.height, - }, - ), - ::ibc::core::ics24_host::Path::ClientConnections(e) => - ibc_core_host_types::path::Path::ClientConnection(ClientConnectionPath( - ibc_core_host_types::identifiers::ClientId::from_str(e.0.as_str()).unwrap(), - )), - ::ibc::core::ics24_host::Path::Connections(e) => - ibc_core_host_types::path::Path::Connection(ConnectionPath( - ibc_core_host_types::identifiers::ConnectionId::from_str(e.0.as_str()).unwrap(), - )), - ::ibc::core::ics24_host::Path::Ports(e) => ibc_core_host_types::path::Path::Ports( - PortPath(ibc_core_host_types::identifiers::PortId::from_str(e.0.as_str()).unwrap()), - ), - ::ibc::core::ics24_host::Path::ChannelEnds(e) => - ibc_core_host_types::path::Path::ChannelEnd(ChannelEndPath( - ibc_core_host_types::identifiers::PortId::from_str(e.0.as_str()).unwrap(), - ibc_core_host_types::identifiers::ChannelId::new(e.1.sequence()), - )), - ::ibc::core::ics24_host::Path::SeqSends(e) => - ibc_core_host_types::path::Path::SeqSend(SeqSendPath( - ibc_core_host_types::identifiers::PortId::from_str(e.0.as_str()).unwrap(), - ibc_core_host_types::identifiers::ChannelId::new(e.1.sequence()), - )), - ::ibc::core::ics24_host::Path::SeqRecvs(e) => - ibc_core_host_types::path::Path::SeqRecv(SeqRecvPath( - ibc_core_host_types::identifiers::PortId::from_str(e.0.as_str()).unwrap(), - ibc_core_host_types::identifiers::ChannelId::new(e.1.sequence()), - )), - ::ibc::core::ics24_host::Path::SeqAcks(e) => - ibc_core_host_types::path::Path::SeqAck(SeqAckPath( - ibc_core_host_types::identifiers::PortId::from_str(e.0.as_str()).unwrap(), - ibc_core_host_types::identifiers::ChannelId::new(e.1.sequence()), - )), - ::ibc::core::ics24_host::Path::Commitments(e) => - ibc_core_host_types::path::Path::Commitment(CommitmentPath { - port_id: ibc_core_host_types::identifiers::PortId::from_str(e.port_id.as_str()) - .unwrap(), - channel_id: ibc_core_host_types::identifiers::ChannelId::new( - e.channel_id.sequence(), - ), - sequence: u64::from(e.sequence.0).into(), - }), - ::ibc::core::ics24_host::Path::Acks(e) => ibc_core_host_types::path::Path::Ack(AckPath { - port_id: ibc_core_host_types::identifiers::PortId::from_str(e.port_id.as_str()) - .unwrap(), - channel_id: ibc_core_host_types::identifiers::ChannelId::new(e.channel_id.sequence()), - sequence: u64::from(e.sequence.0).into(), - }), - ::ibc::core::ics24_host::Path::Receipts(e) => - ibc_core_host_types::path::Path::Receipt(ReceiptPath { - port_id: ibc_core_host_types::identifiers::PortId::from_str(e.port_id.as_str()) - .unwrap(), - channel_id: ibc_core_host_types::identifiers::ChannelId::new( - e.channel_id.sequence(), - ), - sequence: u64::from(e.sequence.0).into(), - }), - ::ibc::core::ics24_host::Path::Upgrade(path) => { - use ::ibc::core::ics24_host::ClientUpgradePath; - use ibc_core_host_types::path::UpgradeClientPath; - match path { - ClientUpgradePath::UpgradedClientState(height) => - UpgradeClientPath::UpgradedClientState(height), - ClientUpgradePath::UpgradedClientConsensusState(height) => - UpgradeClientPath::UpgradedClientConsensusState(height), - } - .into() - }, - ::ibc::core::ics24_host::Path::Outside(e) => panic!("Not supported {:?}", e), - } -} diff --git a/light-clients/icsxx-cf-solana/src/proto.rs b/light-clients/icsxx-cf-solana/src/proto.rs index 01e5cffca..040e2fd53 100644 --- a/light-clients/icsxx-cf-solana/src/proto.rs +++ b/light-clients/icsxx-cf-solana/src/proto.rs @@ -1,52 +1,46 @@ use alloc::string::ToString; -macro_rules! import_proto { - ($Msg:ident) => { - $crate::wrap!(cf_guest_upstream::proto::$Msg as $Msg); - $crate::wrap!(impl Default for $Msg); - - impl prost::Message for $Msg { - fn encode_raw(&self, buf: &mut B) { - prost_12::Message::encode_raw(&self.0, buf) - } - - fn merge_field( - &mut self, - tag: u32, - wire_type: prost::encoding::WireType, - buf: &mut B, - _ctx: prost::encoding::DecodeContext, - ) -> Result<(), prost::DecodeError> { - // SAFETY: The types are identical in prost 0.11 and prost.12. - let wire_type = unsafe { - core::mem::transmute(wire_type as u8) - }; - prost_12::Message::merge_field(&mut self.0, tag, wire_type, buf, Default::default()) - .map_err(|err| { - // SAFETY: The types are identical in prost 0.11 and prost.12. - unsafe { - core::mem::transmute(err) - } - }) - } - - fn encoded_len(&self) -> usize { - prost_12::Message::encoded_len(&self.0) - } - - fn clear(&mut self) { - prost_12::Message::clear(&mut self.0) - } +mod pb { + include!(concat!(env!("OUT_DIR"), "/messages.rs")); +} + +pub use pb::lightclients::cf_solana::v1::*; + +macro_rules! define_proto { + ($Msg:ident; $test:ident; $test_object:expr) => { + proto_utils::define_message! { + pub use pb::lightclients::cf_solana::v1::$Msg as $Msg; + $test $test_object; } - } + }; } -import_proto!(ClientMessage); -import_proto!(ClientState); -import_proto!(ConsensusState); -import_proto!(Header); -import_proto!(Misbehaviour); -import_proto!(Signature); +define_proto!(ClientState; test_client_state; Self { + latest_height: 8, + is_frozen: false, + current_leader: Default::default(), + genesis_time: 0, + trusting_period_ns: 30 * 24 * 3600 * 1_000_000_000, + slot_duration: 0, +}); + +define_proto!(ConsensusState; test_consensus_state; { + let block_hash = lib::hash::CryptoHash::test(42).to_vec(); + Self { block_hash, timestamp_ns: 1 } +}); + +define_proto!(ClientMessage; test_client_message; Header::test().into()); + +define_proto!(Header; test_header; todo!("test_header")); + +define_proto!(Misbehaviour; test_misbehaviour; todo!("test_misbehaviour")); + +// import_proto!(ClientMessage); +// import_proto!(ClientState); +// import_proto!(ConsensusState); +// import_proto!(Header); +// import_proto!(Misbehaviour); +// import_proto!(Signature); /// Error during decoding of a protocol message. #[derive(Clone, PartialEq, Eq, derive_more::From)] @@ -77,26 +71,6 @@ impl From for DecodeError { } } -/// Error during validation of a protocol message. -/// -/// Typing in protocol messages is less descriptive than in Rust. It’s possible -/// to represent state in the protocol message which doesn’t correspond to -/// a valid state. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct BadMessage; - -impl From for BadMessage { - fn from(_: cf_guest_upstream::BadMessage) -> Self { - Self - } -} - -impl From for DecodeError { - fn from(_: BadMessage) -> Self { - Self::BadMessage - } -} - impl core::fmt::Debug for DecodeError { fn fmt(&self, fmtr: &mut core::fmt::Formatter) -> core::fmt::Result { match self { @@ -114,23 +88,16 @@ impl core::fmt::Display for DecodeError { } } -impl core::fmt::Display for BadMessage { - #[inline] - fn fmt(&self, fmtr: &mut core::fmt::Formatter) -> core::fmt::Result { - core::fmt::Debug::fmt(self, fmtr) - } -} - impl From
for ClientMessage { #[inline] fn from(msg: Header) -> Self { - Self(cf_guest_upstream::proto::ClientMessage::from(msg.0)) + Self { message: Some(client_message::Message::Header(msg)) } } } impl From for ClientMessage { #[inline] fn from(msg: Misbehaviour) -> Self { - Self(cf_guest_upstream::proto::ClientMessage::from(msg.0)) + Self { message: Some(client_message::Message::Misbehaviour(msg)) } } } diff --git a/light-clients/icsxx-cf-solana/src/proto/cf-solana.proto b/light-clients/icsxx-cf-solana/src/proto/cf-solana.proto new file mode 100644 index 000000000..e29148d06 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/proto/cf-solana.proto @@ -0,0 +1,205 @@ +syntax = "proto3"; + +package lightclients.cf_solana.v1; + +// The consensus state of the guest blockchain. +message ConsensusState { + // 32-byte block hash. + bytes block_hash = 1; + // Timestamp in nanoseconds. Never zero. + uint64 timestamp_ns = 2; + + // NEXT ID: 3 +} + +// The client state of the light client for the guest blockchain. +message ClientState { + // Height of the latest finalised block. + uint64 latest_height = 1; + + // Duration of the period since the last timestamp during which the + // submitted headers are valid for upgrade. + uint64 trusting_period_ns = 2; + + // Whether client is frozen. + bool is_frozen = 3; + + // Current leader of the blockchain. + bytes current_leader = 4; + + // Genesis time of the blockchain. + uint64 genesis_time = 5; + + // Duration of a slot in the blockchain. + uint64 slot_duration = 6; + + // NEXT ID: 7 +} + +message ClientMessage { + oneof message { + Header header = 1; + Misbehaviour misbehaviour = 2; + } + + // NEXT ID: 3 +} + +message Header { + repeated Shred shreds = 1; + + // NEXT ID: 2 +} + +message Shred { + oneof message { + ShredCode shred_code = 1; + ShredData shred_data = 2; + } + + // NEXT ID: 3 +} + +message ShredCode { + oneof message { + LegacyShredCode legacy_shred_code = 1; + MerkleShredCode merkle_shred_code = 2; + } + + // NEXT ID: 3 +} + +message ShredData { + oneof message { + LegacyShredData legacy_shred_data = 1; + MerkleShredData merkle_shred_data = 2; + } + + // NEXT ID: 3 +} + +// Protobuf definitions for ShredCommonHeader, CodingShredHeader, DataShredHeader, and other related messages. + +message ShredCommonHeader { + bytes signature = 1; + ShredVariant shred_variant = 2; + uint64 slot = 3; + uint32 index = 4; + uint32 version = 5; + uint32 fec_set_index = 6; + + // NEXT ID: 7 +} + +message CodingShredHeader { + uint32 num_data_shreds = 1; + uint32 num_coding_shreds = 2; + uint32 position = 3; // [0..num_coding_shreds) + + // NEXT ID: 4 +} + +message DataShredHeader { + uint32 parent_offset = 1; + ShredFlags flags = 2; + uint32 size = 3; // common shred header + data shred header + data + + // NEXT ID: 4 +} + +message LegacyShredCode { + ShredCommonHeader common_header = 1; + CodingShredHeader coding_header = 2; + bytes payload = 3; + + // NEXT ID: 4 +} + +message MerkleShredCode { + ShredCommonHeader common_header = 1; + CodingShredHeader coding_header = 2; + bytes payload = 3; + + // NEXT ID: 4 +} + +message LegacyShredData { + ShredCommonHeader common_header = 1; + DataShredHeader data_header = 2; + bytes payload = 3; + + // NEXT ID: 4 +} + +message MerkleShredData { + ShredCommonHeader common_header = 1; + DataShredHeader data_header = 2; + bytes payload = 3; + + // NEXT ID: 4 +} + +// Define the ShredFlags as a bitfield. +message ShredFlags { + uint32 bits = 1; + + // NEXT ID: 2 +} + +// Define the ShredVariant enum. +message ShredVariant { + oneof variant { + LegacyCode legacy_code = 1; + LegacyData legacy_data = 2; + MerkleCode merkle_code = 3; + MerkleData merkle_data = 4; + } + + // NEXT ID: 5 +} + +message LegacyCode { + // No additional fields needed +} + +message LegacyData { + // No additional fields needed +} + +message MerkleCode { + uint32 proof_size = 1; + bool chained = 2; + bool resigned = 3; + + // NEXT ID: 4 +} + +message MerkleData { + uint32 proof_size = 1; + bool chained = 2; + bool resigned = 3; + + // NEXT ID: 4 +} + +// Define the ShredType enum. +enum ShredType { + DATA = 0; + CODE = 1; +} + +//message + +message Misbehaviour { + // First header. + Header header1 = 1; + + // Note that the message may be partially filled. If `header1` and + // `header2` have the same `genesis_hash` or `epoch` (which is extremely + // likely), those fields can be omitted in the second header. + // + // Second header. + Header header2 = 2; + + // NEXT ID: 3 +} diff --git a/light-clients/icsxx-cf-solana/src/solana/blockstore.rs b/light-clients/icsxx-cf-solana/src/solana/blockstore.rs new file mode 100644 index 000000000..55949120c --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/blockstore.rs @@ -0,0 +1,78 @@ +//! File source: solana/ledger/src/blockstore.rs + +use crate::solana::{entry::Entry, shred::Shred, shredder::Shredder, Error}; +use alloc::{format, vec::Vec}; +use itertools::Itertools; +use solana_sdk::clock::Slot; +use std::collections::BTreeSet; + +// An upper bound on maximum number of data shreds we can handle in a slot +// 32K shreds would allow ~320K peak TPS +// (32K shreds per slot * 4 TX per shred * 2.5 slots per sec) +pub const MAX_DATA_SHREDS_PER_SLOT: usize = 32_768; + +pub type CompletedRanges = Vec<(u32, u32)>; + +// Get the range of indexes [start_index, end_index] of every completed data block +pub fn get_completed_data_ranges( + start_index: u32, + completed_data_indexes: &BTreeSet, + consumed: u32, +) -> CompletedRanges { + // `consumed` is the next missing shred index, but shred `i` existing in + // completed_data_end_indexes implies it's not missing + assert!(!completed_data_indexes.contains(&consumed)); + completed_data_indexes + .range(start_index..consumed) + .scan(start_index, |begin, index| { + let out = (*begin, *index); + *begin = index + 1; + Some(out) + }) + .collect() +} + +/// Fetch the entries corresponding to all of the shred indices in `completed_ranges` +/// This function takes advantage of the fact that `completed_ranges` are both +/// contiguous and in sorted order. To clarify, suppose completed_ranges is as follows: +/// completed_ranges = [..., (s_i, e_i), (s_i+1, e_i+1), ...] +/// Then, the following statements are true: +/// s_i < e_i < s_i+1 < e_i+1 +/// e_i == s_i+1 + 1 +pub fn get_slot_entries_in_block( + _slot: Slot, + completed_ranges: CompletedRanges, + data_shreds: &[&Shred], +) -> Result, Error> { + assert!(!completed_ranges.is_empty()); + + let (all_ranges_start_index, _) = *completed_ranges.first().unwrap(); + + completed_ranges + .into_iter() + .map(|(start_index, end_index)| { + // The indices from completed_ranges refer to shred indices in the + // entire block; map those indices to indices within data_shreds + let range_start_index = (start_index - all_ranges_start_index) as usize; + let range_end_index = (end_index - all_ranges_start_index) as usize; + let range_shreds = &data_shreds[range_start_index..=range_end_index]; + + let last_shred = range_shreds.last().unwrap(); + + assert!(last_shred.data_complete() || last_shred.last_in_slot()); + + Shredder::deshred(range_shreds) + .map_err(|e| { + Error::Bincode(format!( + "could not reconstruct entries buffer from shreds: {e:?}" + )) + }) + .and_then(|payload| { + bincode::deserialize::>(&payload).map_err(|e| { + Error::Bincode(format!("could not reconstruct entries: {e:?}")) + }) + }) + }) + .flatten_ok() + .collect() +} diff --git a/light-clients/icsxx-cf-solana/src/solana/entry.rs b/light-clients/icsxx-cf-solana/src/solana/entry.rs new file mode 100644 index 000000000..9796d1925 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/entry.rs @@ -0,0 +1,46 @@ +//! File source: solana/entry/src/shred/entry.rs + +use serde::{Deserialize, Serialize}; +use solana_sdk::{hash::Hash, transaction::VersionedTransaction}; +use std::prelude::rust_2015::Vec; + +/// Each Entry contains three pieces of data. The `num_hashes` field is the number +/// of hashes performed since the previous entry. The `hash` field is the result +/// of hashing `hash` from the previous entry `num_hashes` times. The `transactions` +/// field points to Transactions that took place shortly before `hash` was generated. +/// +/// If you multiply `num_hashes` by the amount of time it takes to generate a new hash, you +/// get a duration estimate since the last `Entry`. Since processing power increases +/// over time, one should expect the duration `num_hashes` represents to decrease proportionally. +/// An upper bound on Duration can be estimated by assuming each hash was generated by the +/// world's fastest processor at the time the entry was recorded. Or said another way, it +/// is physically not possible for a shorter duration to have occurred if one assumes the +/// hash was computed by the world's fastest processor at that time. The hash chain is both +/// a Verifiable Delay Function (VDF) and a Proof of Work (not to be confused with Proof of +/// Work consensus!) +/// +/// The solana core protocol currently requires an `Entry` to contain `transactions` that are +/// executable in parallel. Implemented in: +/// +/// * For TPU: `solana_core::banking_stage::BankingStage::process_and_record_transactions()` +/// * For TVU: `solana_core::replay_stage::ReplayStage::replay_blockstore_into_bank()` +/// +/// All transactions in the `transactions` field have to follow the read/write locking restrictions +/// with regard to the accounts they reference. A single account can be either written by a single +/// transaction, or read by one or more transactions, but not both. +/// +/// This enforcement is done via a call to `solana_runtime::accounts::Accounts::lock_accounts()` +/// with the `txs` argument holding all the `transactions` in the `Entry`. +#[derive(Serialize, Deserialize, Debug, Default, PartialEq, Eq, Clone)] +pub struct Entry { + /// The number of hashes since the previous Entry ID. + pub num_hashes: u64, + + /// The SHA-256 hash `num_hashes` after the previous Entry ID. + pub hash: Hash, + + /// An unordered list of transactions that were observed before the Entry ID was + /// generated. They may have been observed before a previous Entry ID but were + /// pushed back into this list to ensure deterministic interpretation of the ledger. + pub transactions: Vec, +} diff --git a/light-clients/icsxx-cf-solana/src/solana/error.rs b/light-clients/icsxx-cf-solana/src/solana/error.rs new file mode 100644 index 000000000..5d50a291b --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/error.rs @@ -0,0 +1,37 @@ +use crate::solana::shred::ShredType; +use core::fmt::{Display, Formatter}; +use solana_sdk::clock::Slot; +use std::prelude::rust_2015::{String, ToString}; + +#[derive(Clone, Debug)] +pub enum Error { + InvalidShredVariant, + ShredSignatureVerificationFailed, + ShredMerkleRootCalculationFailed, + InvalidShredType, + InvalidMerkleProof, + InvalidPayloadSize(usize), + InvalidErasureShardIndex, + InvalidParentOffset { slot: Slot, parent_offset: u16 }, + InvalidProofSize(u8), + InvalidShardSize(usize), + InvalidDataSize(u16), + InvalidShredIndex(ShredType, u32), + InvalidNumCodingShreds(u16), + InvalidShredFlags(u8), + TooFewDataShards, + Bincode(String), // `bincode::Error` doesn't implement `Clone` +} + +// TODO: implement proper Display formatting +impl Display for Error { + fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { + write!(f, "{self:?}") + } +} + +impl From for Error { + fn from(err: bincode::Error) -> Self { + Self::Bincode(err.to_string()) + } +} diff --git a/light-clients/icsxx-cf-solana/src/solana/ledader_schedule.rs b/light-clients/icsxx-cf-solana/src/solana/ledader_schedule.rs new file mode 100644 index 000000000..87536661c --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/ledader_schedule.rs @@ -0,0 +1,87 @@ +//! File source: solana/ledger/src/leader_schedule.rs + +use alloc::vec::Vec; +use itertools::Itertools; +use rand::{distributions::WeightedIndex, prelude::Distribution, SeedableRng}; +use rand_chacha::ChaChaRng; +use solana_sdk::{pubkey::Pubkey}; +use std::{collections::BTreeMap, convert::identity, ops::Index, sync::Arc}; + +#[derive(Debug, Default, PartialEq, Eq, Clone)] +pub struct LeaderSchedule { + slot_leaders: Vec, + // Inverted index from pubkeys to indices where they are the leader. + index: BTreeMap>>, +} + +impl LeaderSchedule { + // Note: passing in zero stakers will cause a panic. + pub fn new(ids_and_stakes: &[(Pubkey, u64)], seed: [u8; 32], len: u64, repeat: u64) -> Self { + let (ids, stakes): (Vec<_>, Vec<_>) = ids_and_stakes.iter().cloned().unzip(); + let rng = &mut ChaChaRng::from_seed(seed); + let weighted_index = WeightedIndex::new(stakes).unwrap(); + let mut current_node = Pubkey::default(); + let slot_leaders = (0..len) + .map(|i| { + if i % repeat == 0 { + current_node = ids[weighted_index.sample(rng)]; + } + current_node + }) + .collect(); + Self::new_from_schedule(slot_leaders) + } + + pub fn new_from_schedule(slot_leaders: Vec) -> Self { + let index = slot_leaders + .iter() + .enumerate() + .map(|(i, pk)| (*pk, i)) + .into_group_map() + .into_iter() + .map(|(k, v)| (k, Arc::new(v))) + .collect(); + Self { slot_leaders, index } + } + + pub fn get_slot_leaders(&self) -> &[Pubkey] { + &self.slot_leaders + } + + pub fn num_slots(&self) -> usize { + self.slot_leaders.len() + } + + /// 'offset' is an index into the leader schedule. The function returns an + /// iterator of indices i >= offset where the given pubkey is the leader. + pub(crate) fn get_indices( + &self, + pubkey: &Pubkey, + offset: usize, // Starting index. + ) -> impl Iterator { + let index = self.index.get(pubkey).cloned().unwrap_or_default(); + let num_slots = self.slot_leaders.len(); + let size = index.len(); + #[allow(clippy::reversed_empty_ranges)] + let range = if index.is_empty() { + 1..=0 // Intentionally empty range of type RangeInclusive. + } else { + let offset = index.binary_search(&(offset % num_slots)).unwrap_or_else(identity) + + offset / num_slots * size; + offset..=usize::MAX + }; + // The modular arithmetic here and above replicate Index implementation + // for LeaderSchedule, where the schedule keeps repeating endlessly. + // The '%' returns where in a cycle we are and the '/' returns how many + // times the schedule is repeated. + range.map(move |k| index[k % size] + k / size * num_slots) + } +} + +impl Index for LeaderSchedule { + type Output = Pubkey; + fn index(&self, index: u64) -> &Pubkey { + let index = index as usize; + &self.slot_leaders[index % self.slot_leaders.len()] + } +} diff --git a/light-clients/icsxx-cf-solana/src/solana/mod.rs b/light-clients/icsxx-cf-solana/src/solana/mod.rs new file mode 100644 index 000000000..b3c317826 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/mod.rs @@ -0,0 +1,11 @@ +#![allow(dead_code)] + +pub mod blockstore; +pub mod entry; +mod error; +mod ledader_schedule; +pub mod packet; +pub mod shred; +pub mod shredder; + +pub use error::Error; diff --git a/light-clients/icsxx-cf-solana/src/solana/packet.rs b/light-clients/icsxx-cf-solana/src/solana/packet.rs new file mode 100644 index 000000000..00cb3d523 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/packet.rs @@ -0,0 +1,20 @@ +//! File source: solana/perf/src/packet.rs + +use bincode::Options; +use serde::de::DeserializeOwned; +use solana_sdk::packet::PACKET_DATA_SIZE; +use std::io::Read; + +pub fn deserialize_from_with_limit(reader: R) -> bincode::Result +where + R: Read, + T: DeserializeOwned, +{ + // with_limit causes pre-allocation size to be limited + // to prevent against memory exhaustion attacks. + bincode::options() + .with_limit(PACKET_DATA_SIZE as u64) + .with_fixint_encoding() + .allow_trailing_bytes() + .deserialize_from(reader) +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/common.rs b/light-clients/icsxx-cf-solana/src/solana/shred/common.rs new file mode 100644 index 000000000..bc7c16680 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/common.rs @@ -0,0 +1,83 @@ +//! File source: solana/ledger/src/shred/common.rs + +macro_rules! dispatch { + ($vis:vis fn $name:ident(&self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::Legacy(shred) => shred.$name($($arg, )?), + Self::Merkle(shred) => shred.$name($($arg, )?), + } + } + }; + ($vis:vis fn $name:ident(self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::Legacy(shred) => shred.$name($($arg, )?), + Self::Merkle(shred) => shred.$name($($arg, )?), + } + } + }; + ($vis:vis fn $name:ident(&mut self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(&mut self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::Legacy(shred) => shred.$name($($arg, )?), + Self::Merkle(shred) => shred.$name($($arg, )?), + } + } + } +} + +macro_rules! impl_shred_common { + () => { + #[inline] + fn common_header(&self) -> &ShredCommonHeader { + &self.common_header + } + + #[inline] + fn payload(&self) -> &Vec { + &self.payload + } + + fn into_payload(self) -> Vec { + self.payload + } + + fn set_signature(&mut self, signature: Signature) { + bincode::serialize_into(&mut self.payload[..], &signature).unwrap(); + self.common_header.signature = signature; + } + + // Only for tests. + fn set_index(&mut self, index: u32) { + match self.common_header.shred_variant { + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + self.common_header.index = index; + bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); + }, + ShredVariant::MerkleCode { .. } | ShredVariant::MerkleData { .. } => { + panic!("Not Implemented!"); + }, + } + } + + // Only for tests. + fn set_slot(&mut self, slot: Slot) { + match self.common_header.shred_variant { + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + self.common_header.slot = slot; + bincode::serialize_into(&mut self.payload[..], &self.common_header).unwrap(); + }, + ShredVariant::MerkleCode { .. } | ShredVariant::MerkleData { .. } => { + panic!("Not Implemented!"); + }, + } + } + }; +} + +pub(super) use dispatch; +pub(super) use impl_shred_common; diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/legacy.rs b/light-clients/icsxx-cf-solana/src/solana/shred/legacy.rs new file mode 100644 index 000000000..774b5e39f --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/legacy.rs @@ -0,0 +1,263 @@ +//! File source: solana/ledger/src/shred/legacy.rs + +use serde::{Deserialize, Serialize}; + +use crate::solana::{ + packet::deserialize_from_with_limit, + shred::{ + common::impl_shred_common, + shred_code, shred_data, + traits::{Shred, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, + CodingShredHeader, DataShredHeader, ShredCommonHeader, ShredFlags, ShredVariant, + SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_COMMON_SHRED_HEADER, SIZE_OF_DATA_SHRED_HEADERS, + SIZE_OF_SIGNATURE, + }, + Error, +}; +use alloc::vec::Vec; +use solana_sdk::{clock::Slot, signature::Signature}; +use static_assertions::const_assert_eq; +use std::{io::Cursor, ops::Range}; + +// All payload including any zero paddings are signed. +// Code and data shreds have the same payload size. +pub(super) const SIGNED_MESSAGE_OFFSETS: Range = + SIZE_OF_SIGNATURE..ShredData::SIZE_OF_PAYLOAD; +const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, ShredCode::SIZE_OF_PAYLOAD); +const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1228); +const_assert_eq!(ShredData::CAPACITY, 1051); + +// ShredCode::SIZE_OF_HEADERS bytes at the end of data shreds +// is never used and is not part of erasure coding. +const_assert_eq!(SIZE_OF_ERASURE_ENCODED_SLICE, 1139); +pub(super) const SIZE_OF_ERASURE_ENCODED_SLICE: usize = + ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS; + +// Layout: {common, data} headers | data | zero padding +// Everything up to ShredCode::SIZE_OF_HEADERS bytes at the end (which is part +// of zero padding) is erasure coded. +// All payload past signature, including the entirety of zero paddings, is +// signed. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct ShredData { + pub(crate) common_header: ShredCommonHeader, + pub(crate) data_header: DataShredHeader, + pub(crate) payload: Vec, +} + +// Layout: {common, coding} headers | erasure coded shard +// All payload past signature is singed. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct ShredCode { + pub(crate) common_header: ShredCommonHeader, + pub(crate) coding_header: CodingShredHeader, + pub(crate) payload: Vec, +} + +impl<'a> Shred<'a> for ShredData { + type SignedData = &'a [u8]; + + impl_shred_common!(); + // Legacy data shreds are always zero padded and + // the same size as coding shreds. + const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; + + fn from_payload(mut payload: Vec) -> Result { + let mut cursor = Cursor::new(&payload[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + if common_header.shred_variant != ShredVariant::LegacyData { + return Err(Error::InvalidShredVariant); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + // Shreds stored to blockstore may have trailing zeros trimmed. + // Repair packets have nonce at the end of packet payload; see: + // https://github.com/solana-labs/solana/pull/10109 + // https://github.com/solana-labs/solana/pull/16602 + if payload.len() < Self::SIZE_OF_HEADERS { + return Err(Error::InvalidPayloadSize(payload.len())); + } + payload.resize(Self::SIZE_OF_PAYLOAD, 0u8); + let shred = Self { common_header, data_header, payload }; + shred.sanitize().map(|_| shred) + } + + fn erasure_shard_index(&self) -> Result { + shred_data::erasure_shard_index(self).ok_or_else(|| Error::InvalidErasureShardIndex) + } + + fn erasure_shard(self) -> Result, Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let mut shard = self.payload; + shard.truncate(SIZE_OF_ERASURE_ENCODED_SLICE); + Ok(shard) + } + + fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + Ok(&self.payload[..SIZE_OF_ERASURE_ENCODED_SLICE]) + } + + fn sanitize(&self) -> Result<(), Error> { + match self.common_header.shred_variant { + ShredVariant::LegacyData => (), + _ => return Err(Error::InvalidShredVariant), + } + shred_data::sanitize(self) + } + + fn signed_data(&'a self) -> Result { + debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD); + Ok(&self.payload[SIZE_OF_SIGNATURE..]) + } +} + +impl<'a> Shred<'a> for ShredCode { + type SignedData = &'a [u8]; + + impl_shred_common!(); + const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; + + fn from_payload(mut payload: Vec) -> Result { + let mut cursor = Cursor::new(&payload[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + if common_header.shred_variant != ShredVariant::LegacyCode { + return Err(Error::InvalidShredVariant); + } + let coding_header = deserialize_from_with_limit(&mut cursor)?; + // Repair packets have nonce at the end of packet payload: + // https://github.com/solana-labs/solana/pull/10109 + payload.truncate(Self::SIZE_OF_PAYLOAD); + let shred = Self { common_header, coding_header, payload }; + shred.sanitize().map(|_| shred) + } + + fn erasure_shard_index(&self) -> Result { + shred_code::erasure_shard_index(self).ok_or_else(|| Error::InvalidErasureShardIndex) + } + + fn erasure_shard(self) -> Result, Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let mut shard = self.payload; + // ShredCode::SIZE_OF_HEADERS bytes at the beginning of the coding + // shreds contains the header and is not part of erasure coding. + shard.drain(..Self::SIZE_OF_HEADERS); + Ok(shard) + } + + fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + Ok(&self.payload[Self::SIZE_OF_HEADERS..]) + } + + fn sanitize(&self) -> Result<(), Error> { + match self.common_header.shred_variant { + ShredVariant::LegacyCode => (), + _ => return Err(Error::InvalidShredVariant), + } + shred_code::sanitize(self) + } + + fn signed_data(&'a self) -> Result { + debug_assert_eq!(self.payload.len(), Self::SIZE_OF_PAYLOAD); + Ok(&self.payload[SIZE_OF_SIGNATURE..]) + } +} + +impl ShredDataTrait for ShredData { + #[inline] + fn data_header(&self) -> &DataShredHeader { + &self.data_header + } + + fn data(&self) -> Result<&[u8], Error> { + let size = usize::from(self.data_header.size); + #[allow(clippy::manual_range_contains)] + if size > self.payload.len() || + size < Self::SIZE_OF_HEADERS || + size > Self::SIZE_OF_HEADERS + Self::CAPACITY + { + return Err(Error::InvalidDataSize(self.data_header.size)); + } + Ok(&self.payload[Self::SIZE_OF_HEADERS..size]) + } +} + +impl ShredCodeTrait for ShredCode { + #[inline] + fn coding_header(&self) -> &CodingShredHeader { + &self.coding_header + } +} + +impl ShredData { + // Maximum size of ledger data that can be embedded in a data-shred. + pub(super) const CAPACITY: usize = + Self::SIZE_OF_PAYLOAD - Self::SIZE_OF_HEADERS - ShredCode::SIZE_OF_HEADERS; + + pub(super) fn new_from_data( + slot: Slot, + index: u32, + parent_offset: u16, + data: &[u8], + flags: ShredFlags, + reference_tick: u8, + version: u16, + fec_set_index: u32, + ) -> Self { + let mut payload = alloc::vec![0; Self::SIZE_OF_PAYLOAD]; + let common_header = ShredCommonHeader { + signature: Signature::default(), + shred_variant: ShredVariant::LegacyData, + slot, + index, + version, + fec_set_index, + }; + let size = (data.len() + Self::SIZE_OF_HEADERS) as u16; + let flags = flags | + ShredFlags::from_bits_retain( + ShredFlags::SHRED_TICK_REFERENCE_MASK.bits().min(reference_tick), + ); + let data_header = DataShredHeader { parent_offset, flags, size }; + let mut cursor = Cursor::new(&mut payload[..]); + bincode::serialize_into(&mut cursor, &common_header).unwrap(); + bincode::serialize_into(&mut cursor, &data_header).unwrap(); + // TODO: Need to check if data is too large! + let offset = cursor.position() as usize; + debug_assert_eq!(offset, Self::SIZE_OF_HEADERS); + payload[offset..offset + data.len()].copy_from_slice(data); + Self { common_header, data_header, payload } + } + + pub(super) fn bytes_to_store(&self) -> &[u8] { + // Payload will be padded out to Self::SIZE_OF_PAYLOAD. + // But only need to store the bytes within data_header.size. + &self.payload[..self.data_header.size as usize] + } + + pub(super) fn resize_stored_shred(mut shred: Vec) -> Result, Error> { + // Old shreds might have been extra zero padded. + if !(Self::SIZE_OF_HEADERS..=Self::SIZE_OF_PAYLOAD).contains(&shred.len()) { + return Err(Error::InvalidPayloadSize(shred.len())); + } + shred.resize(Self::SIZE_OF_PAYLOAD, 0u8); + Ok(shred) + } + + // Only for tests. + pub(crate) fn set_last_in_slot(&mut self) { + self.data_header.flags |= ShredFlags::LAST_SHRED_IN_SLOT; + let buffer = &mut self.payload[SIZE_OF_COMMON_SHRED_HEADER..]; + bincode::serialize_into(buffer, &self.data_header).unwrap(); + } +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/merkle.rs b/light-clients/icsxx-cf-solana/src/solana/shred/merkle.rs new file mode 100644 index 000000000..ce24f8322 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/merkle.rs @@ -0,0 +1,714 @@ +//! File source: solana/ledger/src/shred/merkle.rs + +use serde::{Deserialize, Serialize}; + +#[cfg(test)] +use crate::solana::shred::ShredType; +use crate::solana::{ + packet::deserialize_from_with_limit, + shred::{ + self, + common::impl_shred_common, + dispatch, shred_code, shred_data, + traits::{Shred as ShredTrait, ShredCode as ShredCodeTrait, ShredData as ShredDataTrait}, + CodingShredHeader, DataShredHeader, ShredCommonHeader, ShredVariant, + SIZE_OF_CODING_SHRED_HEADERS, SIZE_OF_DATA_SHRED_HEADERS, SIZE_OF_SIGNATURE, + }, + Error, +}; +use alloc::vec::Vec; +use itertools::Either; +use solana_sdk::{ + clock::Slot, + hash::{hashv, Hash}, + pubkey::Pubkey, + signature::Signature, +}; +use static_assertions::const_assert_eq; +use std::{io::Cursor, ops::Range}; + +const_assert_eq!(SIZE_OF_MERKLE_ROOT, 32); +pub(crate) const SIZE_OF_MERKLE_ROOT: usize = std::mem::size_of::(); +const_assert_eq!(SIZE_OF_MERKLE_PROOF_ENTRY, 20); +pub const SIZE_OF_MERKLE_PROOF_ENTRY: usize = std::mem::size_of::(); +const_assert_eq!(ShredData::SIZE_OF_PAYLOAD, 1203); + +// Defense against second preimage attack: +// https://en.wikipedia.org/wiki/Merkle_tree#Second_preimage_attack +// Following Certificate Transparency, 0x00 and 0x01 bytes are prepended to +// hash data when computing leaf and internal node hashes respectively. +const MERKLE_HASH_PREFIX_LEAF: &[u8] = b"\x00SOLANA_MERKLE_SHREDS_LEAF"; +const MERKLE_HASH_PREFIX_NODE: &[u8] = b"\x01SOLANA_MERKLE_SHREDS_NODE"; + +type MerkleProofEntry = [u8; 20]; + +// Layout: {common, data} headers | data buffer +// | [Merkle root of the previous erasure batch if chained] +// | Merkle proof +// | [Retransmitter's signature if resigned] +// The slice past signature till the end of the data buffer is erasure coded. +// The slice past signature and before the merkle proof is hashed to generate +// the Merkle tree. The root of the Merkle tree is signed. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct ShredData { + pub(crate) common_header: ShredCommonHeader, + pub(crate) data_header: DataShredHeader, + pub(crate) payload: Vec, +} + +// Layout: {common, coding} headers | erasure coded shard +// | [Merkle root of the previous erasure batch if chained] +// | Merkle proof +// | [Retransmitter's signature if resigned] +// The slice past signature and before the merkle proof is hashed to generate +// the Merkle tree. The root of the Merkle tree is signed. +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub struct ShredCode { + pub(crate) common_header: ShredCommonHeader, + pub(crate) coding_header: CodingShredHeader, + pub(crate) payload: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub(super) enum Shred { + ShredCode(ShredCode), + ShredData(ShredData), +} + +impl Shred { + dispatch!(fn common_header(&self) -> &ShredCommonHeader); + dispatch!(fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(fn erasure_shard_index(&self) -> Result); + dispatch!(fn merkle_node(&self) -> Result); + dispatch!(fn payload(&self) -> &Vec); + dispatch!(fn sanitize(&self) -> Result<(), Error>); + dispatch!(fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error>); + dispatch!(fn set_signature(&mut self, signature: Signature)); + dispatch!(fn signed_data(&self) -> Result); + + fn merkle_proof(&self) -> Result, Error> { + match self { + Self::ShredCode(shred) => shred.merkle_proof().map(Either::Left), + Self::ShredData(shred) => shred.merkle_proof().map(Either::Right), + } + } + + #[must_use] + fn verify(&self, pubkey: &Pubkey) -> bool { + match self.signed_data() { + Ok(data) => self.signature().verify(pubkey.as_ref(), data.as_ref()), + Err(_) => false, + } + } + + fn signature(&self) -> &Signature { + &self.common_header().signature + } + + fn from_payload(shred: Vec) -> Result { + match shred::layout::get_shred_variant(&shred)? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => Err(Error::InvalidShredVariant), + ShredVariant::MerkleCode { .. } => Ok(Self::ShredCode(ShredCode::from_payload(shred)?)), + ShredVariant::MerkleData { .. } => Ok(Self::ShredData(ShredData::from_payload(shred)?)), + } + } +} + +#[cfg(test)] +impl Shred { + dispatch!(fn merkle_root(&self) -> Result); + dispatch!(fn proof_size(&self) -> Result); + + fn index(&self) -> u32 { + self.common_header().index + } + + fn shred_type(&self) -> ShredType { + ShredType::from(self.common_header().shred_variant) + } +} + +impl ShredData { + // proof_size is the number of merkle proof entries. + pub(crate) fn proof_size(&self) -> Result { + match self.common_header.shred_variant { + ShredVariant::MerkleData { proof_size, .. } => Ok(proof_size), + _ => Err(Error::InvalidShredVariant), + } + } + + // Maximum size of ledger data that can be embedded in a data-shred. + // Also equal to: + // ShredCode::capacity(proof_size, chained, resigned).unwrap() + // - ShredData::SIZE_OF_HEADERS + // + SIZE_OF_SIGNATURE + pub(super) fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result { + debug_assert!(chained || !resigned); + Self::SIZE_OF_PAYLOAD + .checked_sub( + Self::SIZE_OF_HEADERS + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + + if resigned { SIZE_OF_SIGNATURE } else { 0 }, + ) + .ok_or(Error::InvalidProofSize(proof_size)) + } + + // Where the merkle proof starts in the shred binary. + pub(crate) fn proof_offset(&self) -> Result { + let ShredVariant::MerkleData { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + Self::get_proof_offset(proof_size, chained, resigned) + } + + fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result { + Ok(Self::SIZE_OF_HEADERS + + Self::capacity(proof_size, chained, resigned)? + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + } + + fn chained_merkle_root_offset(&self) -> Result { + let ShredVariant::MerkleData { proof_size, chained: true, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /* chained: */ true, resigned)?) + } + + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { + let offset = self.chained_merkle_root_offset()?; + let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { + return Err(Error::InvalidPayloadSize(self.payload.len())); + }; + buffer.copy_from_slice(chained_merkle_root.as_ref()); + Ok(()) + } + + pub(super) fn merkle_root(&self) -> Result { + let proof_size = self.proof_size()?; + let index = self.erasure_shard_index()?; + let proof_offset = self.proof_offset()?; + let proof = get_merkle_proof(&self.payload, proof_offset, proof_size)?; + let node = get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)?; + get_merkle_root(index, node, proof) + } + + pub(crate) fn merkle_proof(&self) -> Result, Error> { + let proof_size = self.proof_size()?; + let proof_offset = self.proof_offset()?; + get_merkle_proof(&self.payload, proof_offset, proof_size) + } + + pub(crate) fn merkle_node(&self) -> Result { + let proof_offset = self.proof_offset()?; + get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset) + } + + fn from_recovered_shard( + signature: &Signature, + chained_merkle_root: &Option, + mut shard: Vec, + ) -> Result { + let shard_size = shard.len(); + if shard_size + SIZE_OF_SIGNATURE > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, SIZE_OF_SIGNATURE); + shard[0..SIZE_OF_SIGNATURE].copy_from_slice(signature.as_ref()); + // Deserialize headers. + let mut cursor = Cursor::new(&shard[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + let ShredVariant::MerkleData { proof_size, chained, resigned } = + common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + if ShredCode::capacity(proof_size, chained, resigned)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + let mut shred = Self { common_header, data_header, payload: shard }; + if let Some(chained_merkle_root) = chained_merkle_root { + shred.set_chained_merkle_root(chained_merkle_root)?; + } + shred.sanitize()?; + Ok(shred) + } + + fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let proof_offset = self.proof_offset()?; + let mut cursor = Cursor::new( + self.payload + .get_mut(proof_offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + for entry in proof { + bincode::serialize_into(&mut cursor, entry)?; + } + Ok(()) + } + + pub(super) fn get_merkle_root( + shred: &[u8], + proof_size: u8, + chained: bool, + resigned: bool, + ) -> Option { + debug_assert_eq!( + shred::layout::get_shred_variant(shred).unwrap(), + ShredVariant::MerkleData { proof_size, chained, resigned }, + ); + // Shred index in the erasure batch. + let index = { + let fec_set_index = + <[u8; 4]>::try_from(shred.get(79..83)?).map(u32::from_le_bytes).ok()?; + shred::layout::get_index(shred)? + .checked_sub(fec_set_index) + .map(usize::try_from)? + .ok()? + }; + let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; + let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; + let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; + get_merkle_root(index, node, proof).ok() + } +} + +impl ShredCode { + // proof_size is the number of merkle proof entries. + pub(crate) fn proof_size(&self) -> Result { + match self.common_header.shred_variant { + ShredVariant::MerkleCode { proof_size, .. } => Ok(proof_size), + _ => Err(Error::InvalidShredVariant), + } + } + + // Size of buffer embedding erasure codes. + fn capacity(proof_size: u8, chained: bool, resigned: bool) -> Result { + debug_assert!(chained || !resigned); + // Merkle proof is generated and signed after coding shreds are + // generated. Coding shred headers cannot be erasure coded either. + Self::SIZE_OF_PAYLOAD + .checked_sub( + Self::SIZE_OF_HEADERS + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + + if resigned { SIZE_OF_SIGNATURE } else { 0 }, + ) + .ok_or(Error::InvalidProofSize(proof_size)) + } + + // Where the merkle proof starts in the shred binary. + pub(crate) fn proof_offset(&self) -> Result { + let ShredVariant::MerkleCode { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + Self::get_proof_offset(proof_size, chained, resigned) + } + + fn get_proof_offset(proof_size: u8, chained: bool, resigned: bool) -> Result { + Ok(Self::SIZE_OF_HEADERS + + Self::capacity(proof_size, chained, resigned)? + + if chained { SIZE_OF_MERKLE_ROOT } else { 0 }) + } + + fn chained_merkle_root_offset(&self) -> Result { + let ShredVariant::MerkleCode { proof_size, chained: true, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + Ok(Self::SIZE_OF_HEADERS + Self::capacity(proof_size, /* chained: */ true, resigned)?) + } + + fn chained_merkle_root(&self) -> Result { + let offset = self.chained_merkle_root_offset()?; + self.payload + .get(offset..offset + SIZE_OF_MERKLE_ROOT) + .map(Hash::new) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) + } + + fn set_chained_merkle_root(&mut self, chained_merkle_root: &Hash) -> Result<(), Error> { + let offset = self.chained_merkle_root_offset()?; + let Some(buffer) = self.payload.get_mut(offset..offset + SIZE_OF_MERKLE_ROOT) else { + return Err(Error::InvalidPayloadSize(self.payload.len())); + }; + buffer.copy_from_slice(chained_merkle_root.as_ref()); + Ok(()) + } + + pub(super) fn merkle_root(&self) -> Result { + let proof_size = self.proof_size()?; + let index = self.erasure_shard_index()?; + let proof_offset = self.proof_offset()?; + let proof = get_merkle_proof(&self.payload, proof_offset, proof_size)?; + let node = get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset)?; + get_merkle_root(index, node, proof) + } + + pub(crate) fn merkle_proof(&self) -> Result, Error> { + let proof_size = self.proof_size()?; + let proof_offset = self.proof_offset()?; + get_merkle_proof(&self.payload, proof_offset, proof_size) + } + + pub(crate) fn merkle_node(&self) -> Result { + let proof_offset = self.proof_offset()?; + get_merkle_node(&self.payload, SIZE_OF_SIGNATURE..proof_offset) + } + + fn from_recovered_shard( + common_header: ShredCommonHeader, + coding_header: CodingShredHeader, + chained_merkle_root: &Option, + mut shard: Vec, + ) -> Result { + let ShredVariant::MerkleCode { proof_size, chained, resigned } = + common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let shard_size = shard.len(); + if Self::capacity(proof_size, chained, resigned)? != shard_size { + return Err(Error::InvalidShardSize(shard_size)); + } + if shard_size + Self::SIZE_OF_HEADERS > Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidShardSize(shard_size)); + } + shard.resize(Self::SIZE_OF_PAYLOAD, 0u8); + shard.copy_within(0..shard_size, Self::SIZE_OF_HEADERS); + let mut cursor = Cursor::new(&mut shard[..]); + bincode::serialize_into(&mut cursor, &common_header)?; + bincode::serialize_into(&mut cursor, &coding_header)?; + let mut shred = Self { common_header, coding_header, payload: shard }; + if let Some(chained_merkle_root) = chained_merkle_root { + shred.set_chained_merkle_root(chained_merkle_root)?; + } + shred.sanitize()?; + Ok(shred) + } + + fn set_merkle_proof(&mut self, proof: &[&MerkleProofEntry]) -> Result<(), Error> { + let proof_size = self.proof_size()?; + if proof.len() != usize::from(proof_size) { + return Err(Error::InvalidMerkleProof); + } + let proof_offset = self.proof_offset()?; + let mut cursor = Cursor::new( + self.payload + .get_mut(proof_offset..) + .ok_or(Error::InvalidProofSize(proof_size))?, + ); + for entry in proof { + bincode::serialize_into(&mut cursor, entry)?; + } + Ok(()) + } + + pub(super) fn get_merkle_root( + shred: &[u8], + proof_size: u8, + chained: bool, + resigned: bool, + ) -> Option { + debug_assert_eq!( + shred::layout::get_shred_variant(shred).unwrap(), + ShredVariant::MerkleCode { proof_size, chained, resigned }, + ); + // Shred index in the erasure batch. + let index = { + let num_data_shreds = <[u8; 2]>::try_from(shred.get(83..85)?) + .map(u16::from_le_bytes) + .map(usize::from) + .ok()?; + let position = <[u8; 2]>::try_from(shred.get(87..89)?) + .map(u16::from_le_bytes) + .map(usize::from) + .ok()?; + num_data_shreds.checked_add(position)? + }; + let proof_offset = Self::get_proof_offset(proof_size, chained, resigned).ok()?; + let proof = get_merkle_proof(shred, proof_offset, proof_size).ok()?; + let node = get_merkle_node(shred, SIZE_OF_SIGNATURE..proof_offset).ok()?; + get_merkle_root(index, node, proof).ok() + } +} + +impl<'a> ShredTrait<'a> for ShredData { + type SignedData = Hash; + + impl_shred_common!(); + + // Also equal to: + // ShredData::SIZE_OF_HEADERS + // + ShredData::capacity(proof_size, chained, resigned).unwrap() + // + if chained { SIZE_OF_MERKLE_ROOT } else { 0 } + // + usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY + // + if resigned { SIZE_OF_SIGNATURE } else { 0 } + const SIZE_OF_PAYLOAD: usize = + ShredCode::SIZE_OF_PAYLOAD - ShredCode::SIZE_OF_HEADERS + SIZE_OF_SIGNATURE; + const SIZE_OF_HEADERS: usize = SIZE_OF_DATA_SHRED_HEADERS; + + fn from_payload(mut payload: Vec) -> Result { + // see: https://github.com/solana-labs/solana/pull/10109 + if payload.len() < Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(payload.len())); + } + payload.truncate(Self::SIZE_OF_PAYLOAD); + let mut cursor = Cursor::new(&payload[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + if !matches!(common_header.shred_variant, ShredVariant::MerkleData { .. }) { + return Err(Error::InvalidShredVariant); + } + let data_header = deserialize_from_with_limit(&mut cursor)?; + let shred = Self { common_header, data_header, payload }; + shred.sanitize()?; + Ok(shred) + } + + fn erasure_shard_index(&self) -> Result { + shred_data::erasure_shard_index(self).ok_or_else(|| Error::InvalidErasureShardIndex) + } + + fn erasure_shard(self) -> Result, Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let ShredVariant::MerkleData { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; + let mut shard = self.payload; + shard.truncate(offset); + shard.drain(..SIZE_OF_SIGNATURE); + Ok(shard) + } + + fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let ShredVariant::MerkleData { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; + self.payload + .get(SIZE_OF_SIGNATURE..offset) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) + } + + fn sanitize(&self) -> Result<(), Error> { + let shred_variant = self.common_header.shred_variant; + if !matches!(shred_variant, ShredVariant::MerkleData { .. }) { + return Err(Error::InvalidShredVariant); + } + let _ = self.merkle_proof()?; + shred_data::sanitize(self) + } + + fn signed_data(&'a self) -> Result { + self.merkle_root() + } +} + +impl<'a> ShredTrait<'a> for ShredCode { + type SignedData = Hash; + + impl_shred_common!(); + const SIZE_OF_PAYLOAD: usize = shred_code::ShredCode::SIZE_OF_PAYLOAD; + const SIZE_OF_HEADERS: usize = SIZE_OF_CODING_SHRED_HEADERS; + + fn from_payload(mut payload: Vec) -> Result { + let mut cursor = Cursor::new(&payload[..]); + let common_header: ShredCommonHeader = deserialize_from_with_limit(&mut cursor)?; + if !matches!(common_header.shred_variant, ShredVariant::MerkleCode { .. }) { + return Err(Error::InvalidShredVariant); + } + let coding_header = deserialize_from_with_limit(&mut cursor)?; + // see: https://github.com/solana-labs/solana/pull/10109 + if payload.len() < Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(payload.len())); + } + payload.truncate(Self::SIZE_OF_PAYLOAD); + let shred = Self { common_header, coding_header, payload }; + shred.sanitize()?; + Ok(shred) + } + + fn erasure_shard_index(&self) -> Result { + shred_code::erasure_shard_index(self).ok_or_else(|| Error::InvalidErasureShardIndex) + } + + fn erasure_shard(self) -> Result, Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let ShredVariant::MerkleCode { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; + let mut shard = self.payload; + shard.truncate(offset); + shard.drain(..Self::SIZE_OF_HEADERS); + Ok(shard) + } + + fn erasure_shard_as_slice(&self) -> Result<&[u8], Error> { + if self.payload.len() != Self::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(self.payload.len())); + } + let ShredVariant::MerkleCode { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let offset = Self::SIZE_OF_HEADERS + Self::capacity(proof_size, chained, resigned)?; + self.payload + .get(Self::SIZE_OF_HEADERS..offset) + .ok_or(Error::InvalidPayloadSize(self.payload.len())) + } + + fn sanitize(&self) -> Result<(), Error> { + let shred_variant = self.common_header.shred_variant; + if !matches!(shred_variant, ShredVariant::MerkleCode { .. }) { + return Err(Error::InvalidShredVariant); + } + let _ = self.merkle_proof()?; + shred_code::sanitize(self) + } + + fn signed_data(&'a self) -> Result { + self.merkle_root() + } +} + +impl ShredDataTrait for ShredData { + #[inline] + fn data_header(&self) -> &DataShredHeader { + &self.data_header + } + + fn data(&self) -> Result<&[u8], Error> { + let ShredVariant::MerkleData { proof_size, chained, resigned } = + self.common_header.shred_variant + else { + return Err(Error::InvalidShredVariant); + }; + let data_buffer_size = Self::capacity(proof_size, chained, resigned)?; + let size = usize::from(self.data_header.size); + if size > self.payload.len() || + size < Self::SIZE_OF_HEADERS || + size > Self::SIZE_OF_HEADERS + data_buffer_size + { + return Err(Error::InvalidDataSize(self.data_header.size)) + } + Ok(&self.payload[Self::SIZE_OF_HEADERS..size]) + } +} + +impl ShredCodeTrait for ShredCode { + #[inline] + fn coding_header(&self) -> &CodingShredHeader { + &self.coding_header + } +} + +// Obtains parent's hash by joining two sibiling nodes in merkle tree. +fn join_nodes, T: AsRef<[u8]>>(node: S, other: T) -> Hash { + let node = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + let other = &other.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + hashv(&[MERKLE_HASH_PREFIX_NODE, node, other]) +} + +// Recovers root of the merkle tree from a leaf node +// at the given index and the respective proof. +fn get_merkle_root<'a, I>(index: usize, node: Hash, proof: I) -> Result +where + I: IntoIterator, +{ + let (index, root) = proof.into_iter().fold((index, node), |(index, node), other| { + let parent = if index % 2 == 0 { join_nodes(node, other) } else { join_nodes(other, node) }; + (index >> 1, parent) + }); + (index == 0).then_some(root).ok_or(Error::InvalidMerkleProof) +} + +fn get_merkle_proof( + shred: &[u8], + proof_offset: usize, // Where the merkle proof starts. + proof_size: u8, // Number of proof entries. +) -> Result, Error> { + let proof_size = usize::from(proof_size) * SIZE_OF_MERKLE_PROOF_ENTRY; + Ok(shred + .get(proof_offset..proof_offset + proof_size) + .ok_or(Error::InvalidPayloadSize(shred.len()))? + .chunks(SIZE_OF_MERKLE_PROOF_ENTRY) + .map(<&MerkleProofEntry>::try_from) + .map(Result::unwrap)) +} + +fn get_merkle_node(shred: &[u8], offsets: Range) -> Result { + let node = shred.get(offsets).ok_or(Error::InvalidPayloadSize(shred.len()))?; + Ok(hashv(&[MERKLE_HASH_PREFIX_LEAF, node])) +} + +fn make_merkle_tree(mut nodes: Vec) -> Vec { + let mut size = nodes.len(); + while size > 1 { + let offset = nodes.len() - size; + for index in (offset..offset + size).step_by(2) { + let node = &nodes[index]; + let other = &nodes[(index + 1).min(offset + size - 1)]; + let parent = join_nodes(node, other); + nodes.push(parent); + } + size = nodes.len() - offset - size; + } + nodes +} + +fn make_merkle_proof( + mut index: usize, // leaf index ~ shred's erasure shard index. + mut size: usize, // number of leaves ~ erasure batch size. + tree: &[Hash], +) -> Option> { + if index >= size { + return None; + } + let mut offset = 0; + let mut proof = Vec::<&MerkleProofEntry>::new(); + while size > 1 { + let node = tree.get(offset + (index ^ 1).min(size - 1))?; + let entry = &node.as_ref()[..SIZE_OF_MERKLE_PROOF_ENTRY]; + proof.push(<&MerkleProofEntry>::try_from(entry).unwrap()); + offset += size; + size = (size + 1) >> 1; + index >>= 1; + } + (offset + 1 == tree.len()).then_some(proof) +} + +// Maps number of (code + data) shreds to merkle_proof.len(). +fn get_proof_size(num_shreds: usize) -> u8 { + let bits = usize::BITS - num_shreds.leading_zeros(); + let proof_size = if num_shreds.is_power_of_two() { bits.checked_sub(1).unwrap() } else { bits }; + u8::try_from(proof_size).unwrap() +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/mod.rs b/light-clients/icsxx-cf-solana/src/solana/shred/mod.rs new file mode 100644 index 000000000..1685ea95c --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/mod.rs @@ -0,0 +1,740 @@ +//! File source: solana/ledger/src/shred.rs +//! +//! The `shred` module defines data structures and methods to pull MTU sized data frames from the +//! network. There are two types of shreds: data and coding. Data shreds contain entry information +//! while coding shreds provide redundancy to protect against dropped network packets (erasures). +//! +//! +---------------------------------------------------------------------------------------------+ +//! | Data Shred | +//! +---------------------------------------------------------------------------------------------+ +//! | common | data | payload | +//! | header | header | | +//! |+---+---+--- |+---+---+---|+----------------------------------------------------------+----+| +//! || s | s | . || p | f | s || data (ie ledger entries) | r || +//! || i | h | . || a | l | i || | e || +//! || g | r | . || r | a | z || See notes immediately after shred diagrams for an | s || +//! || n | e | || e | g | e || explanation of the "restricted" section in this payload | t || +//! || a | d | || n | s | || | r || +//! || t | | || t | | || | i || +//! || u | t | || | | || | c || +//! || r | y | || o | | || | t || +//! || e | p | || f | | || | e || +//! || | e | || f | | || | d || +//! |+---+---+--- |+---+---+---+|----------------------------------------------------------+----+| +//! +---------------------------------------------------------------------------------------------+ +//! +//! +---------------------------------------------------------------------------------------------+ +//! | Coding Shred | +//! +---------------------------------------------------------------------------------------------+ +//! | common | coding | payload | +//! | header | header | | +//! |+---+---+--- |+---+---+---+----------------------------------------------------------------+| +//! || s | s | . || n | n | p || data (encoded data shred data) || +//! || i | h | . || u | u | o || || +//! || g | r | . || m | m | s || || +//! || n | e | || | | i || || +//! || a | d | || d | c | t || || +//! || t | | || | | i || || +//! || u | t | || s | s | o || || +//! || r | y | || h | h | n || || +//! || e | p | || r | r | || || +//! || | e | || e | e | || || +//! || | | || d | d | || || +//! |+---+---+--- |+---+---+---+|+--------------------------------------------------------------+| +//! +---------------------------------------------------------------------------------------------+ +//! +//! Notes: +//! a) Coding shreds encode entire data shreds: both of the headers AND the payload. +//! b) Coding shreds require their own headers for identification and etc. +//! c) The erasure algorithm requires data shred and coding shred bytestreams to be equal in length. +//! +//! So, given a) - c), we must restrict data shred's payload length such that the entire coding +//! payload can fit into one coding shred / packet. +//! +//! TODO: check if `Legacy` variants of shreds are still being used in Solana. If not, get rif of +//! them, not breaking the encoding + +pub use self::shred_data::ShredData; +use self::{shred_code::ShredCode, traits::Shred as _}; +use crate::solana::Error; +use alloc::vec::Vec; +use bitflags::bitflags; +use core::fmt::Debug; +use num_enum::{IntoPrimitive, TryFromPrimitive}; +use serde::{Deserialize, Serialize}; +use solana_sdk::{ + clock::Slot, + hash::Hash, + pubkey::Pubkey, + signature::{Keypair, Signature, Signer, SIGNATURE_BYTES}, +}; +use static_assertions::const_assert_eq; + +mod common; +pub mod legacy; +pub mod merkle; +pub mod shred_code; +pub mod shred_data; +pub(crate) mod traits; + +pub type Nonce = u32; +const_assert_eq!(SIZE_OF_NONCE, 4); +pub const SIZE_OF_NONCE: usize = std::mem::size_of::(); + +/// The following constants are computed by hand, and hardcoded. +/// `test_shred_constants` ensures that the values are correct. +/// Constants are used over lazy_static for performance reasons. +const SIZE_OF_COMMON_SHRED_HEADER: usize = 83; +const SIZE_OF_DATA_SHRED_HEADERS: usize = 88; +const SIZE_OF_CODING_SHRED_HEADERS: usize = 89; +const SIZE_OF_SIGNATURE: usize = SIGNATURE_BYTES; +const SIZE_OF_SHRED_VARIANT: usize = 1; +const SIZE_OF_SHRED_SLOT: usize = 8; + +const OFFSET_OF_SHRED_VARIANT: usize = SIZE_OF_SIGNATURE; +const OFFSET_OF_SHRED_SLOT: usize = SIZE_OF_SIGNATURE + SIZE_OF_SHRED_VARIANT; +const OFFSET_OF_SHRED_INDEX: usize = OFFSET_OF_SHRED_SLOT + SIZE_OF_SHRED_SLOT; + +// Shreds are uniformly split into erasure batches with a "target" number of +// data shreds per each batch as below. The actual number of data shreds in +// each erasure batch depends on the number of shreds obtained from serializing +// a &[Entry]. +pub const DATA_SHREDS_PER_FEC_BLOCK: usize = 32; + +// LAST_SHRED_IN_SLOT also implies DATA_COMPLETE_SHRED. +// So it cannot be LAST_SHRED_IN_SLOT if not also DATA_COMPLETE_SHRED. +bitflags! { + #[derive(Clone, Copy, Debug, Default, Eq, PartialEq, Serialize, Deserialize)] + pub struct ShredFlags:u8 { + const SHRED_TICK_REFERENCE_MASK = 0b0011_1111; + const DATA_COMPLETE_SHRED = 0b0100_0000; + const LAST_SHRED_IN_SLOT = 0b1100_0000; + } +} + +#[repr(u8)] +#[derive( + Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, IntoPrimitive, Serialize, TryFromPrimitive, +)] +#[serde(into = "u8", try_from = "u8")] +pub enum ShredType { + Data = 0b1010_0101, + Code = 0b0101_1010, +} + +#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)] +#[serde(into = "u8", try_from = "u8")] +pub enum ShredVariant { + LegacyCode, // 0b0101_1010 + LegacyData, // 0b1010_0101 + // proof_size is the number of Merkle proof entries, and is encoded in the + // lowest 4 bits of the binary representation. The first 4 bits identify + // the shred variant: + // 0b0100_???? MerkleCode + // 0b0110_???? MerkleCode chained + // 0b0111_???? MerkleCode chained resigned + // 0b1000_???? MerkleData + // 0b1001_???? MerkleData chained + // 0b1011_???? MerkleData chained resigned + MerkleCode { proof_size: u8, chained: bool, resigned: bool }, // 0b01??_???? + MerkleData { proof_size: u8, chained: bool, resigned: bool }, // 0b10??_???? +} + +/// A common header that is present in data and code shred headers +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct ShredCommonHeader { + pub(crate) signature: Signature, + pub(crate) shred_variant: ShredVariant, + pub(crate) slot: Slot, + pub(crate) index: u32, + pub(crate) version: u16, + pub(crate) fec_set_index: u32, +} + +/// The data shred header has parent offset and flags +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct DataShredHeader { + pub(crate) parent_offset: u16, + pub(crate) flags: ShredFlags, + pub(crate) size: u16, // common shred header + data shred header + data +} + +/// The coding shred header has FEC information +#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub struct CodingShredHeader { + pub(crate) num_data_shreds: u16, + pub(crate) num_coding_shreds: u16, + pub(crate) position: u16, // [0..num_coding_shreds) +} + +#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] +pub enum Shred { + ShredCode(ShredCode), + ShredData(ShredData), +} + +#[derive(Debug, PartialEq, Eq)] +pub(crate) enum SignedData<'a> { + Chunk(&'a [u8]), // Chunk of payload past signature. + MerkleRoot(Hash), +} + +impl<'a> AsRef<[u8]> for SignedData<'a> { + fn as_ref(&self) -> &[u8] { + match self { + Self::Chunk(chunk) => chunk, + Self::MerkleRoot(root) => root.as_ref(), + } + } +} + +#[macro_export] +macro_rules! dispatch { + ($vis:vis fn $name:ident(&self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(&self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::ShredCode(shred) => shred.$name($($arg, )?), + Self::ShredData(shred) => shred.$name($($arg, )?), + } + } + }; + ($vis:vis fn $name:ident(self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::ShredCode(shred) => shred.$name($($arg, )?), + Self::ShredData(shred) => shred.$name($($arg, )?), + } + } + }; + ($vis:vis fn $name:ident(&mut self $(, $arg:ident : $ty:ty)?) $(-> $out:ty)?) => { + #[inline] + $vis fn $name(&mut self $(, $arg:$ty)?) $(-> $out)? { + match self { + Self::ShredCode(shred) => shred.$name($($arg, )?), + Self::ShredData(shred) => shred.$name($($arg, )?), + } + } + } +} + +pub use dispatch; + +impl Shred { + dispatch!(fn common_header(&self) -> &ShredCommonHeader); + dispatch!(fn set_signature(&mut self, signature: Signature)); + dispatch!(fn signed_data(&self) -> Result); + + // Returns the portion of the shred's payload which is erasure coded. + dispatch!(pub(crate) fn erasure_shard(self) -> Result, Error>); + // Like Shred::erasure_shard but returning a slice. + dispatch!(pub(crate) fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + // Returns the shard index within the erasure coding set. + dispatch!(pub(crate) fn erasure_shard_index(&self) -> Result); + + dispatch!(pub fn into_payload(self) -> Vec); + dispatch!(pub fn merkle_root(&self) -> Result); + dispatch!(pub fn payload(&self) -> &Vec); + dispatch!(pub fn sanitize(&self) -> Result<(), Error>); + + // Only for tests. + dispatch!(pub fn set_index(&mut self, index: u32)); + dispatch!(pub fn set_slot(&mut self, slot: Slot)); + + // TODO: Should this sanitize output? + pub fn new_from_data( + slot: Slot, + index: u32, + parent_offset: u16, + data: &[u8], + flags: ShredFlags, + reference_tick: u8, + version: u16, + fec_set_index: u32, + ) -> Self { + Self::from(ShredData::new_from_data( + slot, + index, + parent_offset, + data, + flags, + reference_tick, + version, + fec_set_index, + )) + } + + pub fn new_from_serialized_shred(shred: Vec) -> Result { + Ok(match layout::get_shred_variant(&shred)? { + ShredVariant::LegacyCode => { + let shred = legacy::ShredCode::from_payload(shred)?; + Self::from(ShredCode::from(shred)) + }, + ShredVariant::LegacyData => { + let shred = legacy::ShredData::from_payload(shred)?; + Self::from(ShredData::from(shred)) + }, + ShredVariant::MerkleCode { .. } => { + let shred = merkle::ShredCode::from_payload(shred)?; + Self::from(ShredCode::from(shred)) + }, + ShredVariant::MerkleData { .. } => { + let shred = merkle::ShredData::from_payload(shred)?; + Self::from(ShredData::from(shred)) + }, + }) + } + + pub fn slot(&self) -> Slot { + self.common_header().slot + } + + pub fn parent(&self) -> Result { + match self { + Self::ShredCode(_) => Err(Error::InvalidShredType), + Self::ShredData(shred) => shred.parent(), + } + } + + pub fn index(&self) -> u32 { + self.common_header().index + } + + pub(crate) fn data(&self) -> Result<&[u8], Error> { + match self { + Self::ShredCode(_) => Err(Error::InvalidShredType), + Self::ShredData(shred) => shred.data(), + } + } + + // Possibly trimmed payload; + // Should only be used when storing shreds to blockstore. + pub(crate) fn bytes_to_store(&self) -> &[u8] { + match self { + Self::ShredCode(shred) => shred.payload(), + Self::ShredData(shred) => shred.bytes_to_store(), + } + } + + pub fn fec_set_index(&self) -> u32 { + self.common_header().fec_set_index + } + + pub(crate) fn first_coding_index(&self) -> Option { + match self { + Self::ShredCode(shred) => shred.first_coding_index(), + Self::ShredData(_) => None, + } + } + + pub fn version(&self) -> u16 { + self.common_header().version + } + + pub fn signature(&self) -> &Signature { + &self.common_header().signature + } + + pub fn sign(&mut self, keypair: &Keypair) { + let data = self.signed_data().unwrap(); + let signature = keypair.sign_message(data.as_ref()); + self.set_signature(signature); + } + + #[inline] + pub fn shred_type(&self) -> ShredType { + ShredType::from(self.common_header().shred_variant) + } + + pub fn is_data(&self) -> bool { + self.shred_type() == ShredType::Data + } + pub fn is_code(&self) -> bool { + self.shred_type() == ShredType::Code + } + + pub fn last_in_slot(&self) -> bool { + match self { + Self::ShredCode(_) => false, + Self::ShredData(shred) => shred.last_in_slot(), + } + } + + /// This is not a safe function. It only changes the meta information. + /// Use this only for test code which doesn't care about actual shred + pub fn set_last_in_slot(&mut self) { + match self { + Self::ShredCode(_) => (), + Self::ShredData(shred) => shred.set_last_in_slot(), + } + } + + pub fn data_complete(&self) -> bool { + match self { + Self::ShredCode(_) => false, + Self::ShredData(shred) => shred.data_complete(), + } + } + + pub(crate) fn reference_tick(&self) -> u8 { + match self { + Self::ShredCode(_) => ShredFlags::SHRED_TICK_REFERENCE_MASK.bits(), + Self::ShredData(shred) => shred.reference_tick(), + } + } + + #[must_use] + pub fn verify(&self, pubkey: &Pubkey) -> bool { + match self.signed_data() { + Ok(data) => self.signature().verify(pubkey.as_ref(), data.as_ref()), + Err(_) => false, + } + } + + #[must_use] + pub fn verify_with_root(&self, leader: &Pubkey) -> Result { + // Verifies the proof and returns the root + if !self.verify(leader) { + return Err(Error::ShredSignatureVerificationFailed); + } + self.merkle_root() + } + + // Returns true if the erasure coding of the two shreds mismatch. + pub(crate) fn erasure_mismatch(&self, other: &Self) -> Result { + match (self, other) { + (Self::ShredCode(shred), Self::ShredCode(other)) => Ok(shred.erasure_mismatch(other)), + _ => Err(Error::InvalidShredType), + } + } + + pub fn num_data_shreds(&self) -> Result { + match self { + Self::ShredCode(shred) => Ok(shred.num_data_shreds()), + Self::ShredData(_) => Err(Error::InvalidShredType), + } + } + + pub fn num_coding_shreds(&self) -> Result { + match self { + Self::ShredCode(shred) => Ok(shred.num_coding_shreds()), + Self::ShredData(_) => Err(Error::InvalidShredType), + } + } +} + +// Helper methods to extract pieces of the shred from the payload +// without deserializing the entire payload. +pub mod layout { + use super::*; + use std::ops::Range; + + pub(crate) fn get_signature(shred: &[u8]) -> Option { + shred.get(..SIZE_OF_SIGNATURE).map(Signature::try_from)?.ok() + } + + pub(crate) const fn get_signature_range() -> Range { + 0..SIZE_OF_SIGNATURE + } + + pub(super) fn get_shred_variant(shred: &[u8]) -> Result { + let Some(&shred_variant) = shred.get(OFFSET_OF_SHRED_VARIANT) else { + return Err(Error::InvalidPayloadSize(shred.len())); + }; + ShredVariant::try_from(shred_variant).map_err(|_| Error::InvalidShredVariant) + } + + #[inline] + pub(super) fn get_shred_type(shred: &[u8]) -> Result { + let shred_variant = get_shred_variant(shred)?; + Ok(ShredType::from(shred_variant)) + } + + #[inline] + pub fn get_slot(shred: &[u8]) -> Option { + <[u8; 8]>::try_from(shred.get(OFFSET_OF_SHRED_SLOT..)?.get(..8)?) + .map(Slot::from_le_bytes) + .ok() + } + + #[inline] + pub(super) fn get_index(shred: &[u8]) -> Option { + <[u8; 4]>::try_from(shred.get(OFFSET_OF_SHRED_INDEX..)?.get(..4)?) + .map(u32::from_le_bytes) + .ok() + } + + pub fn get_version(shred: &[u8]) -> Option { + <[u8; 2]>::try_from(shred.get(77..79)?).map(u16::from_le_bytes).ok() + } + + // The caller should verify first that the shred is data and not code! + pub(super) fn get_parent_offset(shred: &[u8]) -> Option { + debug_assert_eq!(get_shred_type(shred).unwrap(), ShredType::Data); + <[u8; 2]>::try_from(shred.get(83..85)?).map(u16::from_le_bytes).ok() + } + + pub(crate) fn get_signed_data(shred: &[u8]) -> Option { + let data = match get_shred_variant(shred).ok()? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + let chunk = shred.get(self::legacy::SIGNED_MESSAGE_OFFSETS)?; + SignedData::Chunk(chunk) + }, + ShredVariant::MerkleCode { proof_size, chained, resigned } => { + let merkle_root = + self::merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned)?; + SignedData::MerkleRoot(merkle_root) + }, + ShredVariant::MerkleData { proof_size, chained, resigned } => { + let merkle_root = + self::merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned)?; + SignedData::MerkleRoot(merkle_root) + }, + }; + Some(data) + } + + // Returns offsets within the shred payload which is signed. + pub(crate) fn get_signed_data_offsets(shred: &[u8]) -> Option> { + match get_shred_variant(shred).ok()? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => { + let offsets = self::legacy::SIGNED_MESSAGE_OFFSETS; + (offsets.end <= shred.len()).then_some(offsets) + }, + // Merkle shreds sign merkle tree root which can be recovered from + // the merkle proof embedded in the payload but itself is not + // stored the payload. + ShredVariant::MerkleCode { .. } => None, + ShredVariant::MerkleData { .. } => None, + } + } + + pub fn get_reference_tick(shred: &[u8]) -> Result { + if get_shred_type(shred)? != ShredType::Data { + return Err(Error::InvalidShredType); + } + let Some(flags) = shred.get(85) else { + return Err(Error::InvalidPayloadSize(shred.len())); + }; + Ok(flags & ShredFlags::SHRED_TICK_REFERENCE_MASK.bits()) + } + + pub fn get_merkle_root(shred: &[u8]) -> Option { + match get_shred_variant(shred).ok()? { + ShredVariant::LegacyCode | ShredVariant::LegacyData => None, + ShredVariant::MerkleCode { proof_size, chained, resigned } => + merkle::ShredCode::get_merkle_root(shred, proof_size, chained, resigned), + ShredVariant::MerkleData { proof_size, chained, resigned } => + merkle::ShredData::get_merkle_root(shred, proof_size, chained, resigned), + } + } +} + +impl From for Shred { + fn from(shred: ShredCode) -> Self { + Self::ShredCode(shred) + } +} + +impl From for Shred { + fn from(shred: ShredData) -> Self { + Self::ShredData(shred) + } +} + +impl From for Shred { + fn from(shred: merkle::Shred) -> Self { + match shred { + merkle::Shred::ShredCode(shred) => Self::ShredCode(ShredCode::Merkle(shred)), + merkle::Shred::ShredData(shred) => Self::ShredData(ShredData::Merkle(shred)), + } + } +} + +impl TryFrom for merkle::Shred { + type Error = Error; + + fn try_from(shred: Shred) -> Result { + match shred { + Shred::ShredCode(ShredCode::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredCode(ShredCode::Merkle(shred)) => Ok(Self::ShredCode(shred)), + Shred::ShredData(ShredData::Legacy(_)) => Err(Error::InvalidShredVariant), + Shred::ShredData(ShredData::Merkle(shred)) => Ok(Self::ShredData(shred)), + } + } +} + +impl From for ShredType { + #[inline] + fn from(shred_variant: ShredVariant) -> Self { + match shred_variant { + ShredVariant::LegacyCode => ShredType::Code, + ShredVariant::LegacyData => ShredType::Data, + ShredVariant::MerkleCode { .. } => ShredType::Code, + ShredVariant::MerkleData { .. } => ShredType::Data, + } + } +} + +impl From for u8 { + fn from(shred_variant: ShredVariant) -> u8 { + match shred_variant { + ShredVariant::LegacyCode => u8::from(ShredType::Code), + ShredVariant::LegacyData => u8::from(ShredType::Data), + ShredVariant::MerkleCode { proof_size, chained: false, resigned: false } => + proof_size | 0x40, + ShredVariant::MerkleCode { proof_size, chained: true, resigned: false } => + proof_size | 0x60, + ShredVariant::MerkleCode { proof_size, chained: true, resigned: true } => + proof_size | 0x70, + ShredVariant::MerkleData { proof_size, chained: false, resigned: false } => + proof_size | 0x80, + ShredVariant::MerkleData { proof_size, chained: true, resigned: false } => + proof_size | 0x90, + ShredVariant::MerkleData { proof_size, chained: true, resigned: true } => + proof_size | 0xb0, + ShredVariant::MerkleCode { proof_size: _, chained: false, resigned: true } | + ShredVariant::MerkleData { proof_size: _, chained: false, resigned: true } => + panic!("Invalid shred variant: {shred_variant:?}"), + } + } +} + +impl TryFrom for ShredVariant { + type Error = Error; + + fn try_from(shred_variant: u8) -> Result { + if shred_variant == u8::from(ShredType::Code) { + Ok(ShredVariant::LegacyCode) + } else if shred_variant == u8::from(ShredType::Data) { + Ok(ShredVariant::LegacyData) + } else { + let proof_size = shred_variant & 0x0F; + match shred_variant & 0xF0 { + 0x40 => + Ok(ShredVariant::MerkleCode { proof_size, chained: false, resigned: false }), + 0x60 => Ok(ShredVariant::MerkleCode { proof_size, chained: true, resigned: false }), + 0x70 => Ok(ShredVariant::MerkleCode { proof_size, chained: true, resigned: true }), + 0x80 => + Ok(ShredVariant::MerkleData { proof_size, chained: false, resigned: false }), + 0x90 => Ok(ShredVariant::MerkleData { proof_size, chained: true, resigned: false }), + 0xb0 => Ok(ShredVariant::MerkleData { proof_size, chained: true, resigned: true }), + _ => Err(Error::InvalidShredVariant), + } + } + } +} + +// TODO: use `should_discard_shred` in header verification? +/* +// Accepts shreds in the slot range [root + 1, max_slot]. +#[must_use] +pub fn should_discard_shred( + packet: &Packet, + root: Slot, + max_slot: Slot, + shred_version: u16, + should_drop_legacy_shreds: impl Fn(Slot) -> bool, + enable_chained_merkle_shreds: impl Fn(Slot) -> bool, + stats: &mut ShredFetchStats, +) -> bool { + debug_assert!(root < max_slot); + let shred = match layout::get_shred(packet) { + None => { + stats.index_overrun += 1; + return true; + }, + Some(shred) => shred, + }; + match layout::get_version(shred) { + None => { + stats.index_overrun += 1; + return true; + }, + Some(version) => + if version != shred_version { + stats.shred_version_mismatch += 1; + return true; + }, + } + let Ok(shred_variant) = layout::get_shred_variant(shred) else { + stats.bad_shred_type += 1; + return true; + }; + let slot = match layout::get_slot(shred) { + Some(slot) => { + if slot > max_slot { + stats.slot_out_of_range += 1; + return true; + } + slot + }, + None => { + stats.slot_bad_deserialize += 1; + return true; + }, + }; + let Some(index) = layout::get_index(shred) else { + stats.index_bad_deserialize += 1; + return true; + }; + match ShredType::from(shred_variant) { + ShredType::Code => { + if index >= shred_code::MAX_CODE_SHREDS_PER_SLOT as u32 { + stats.index_out_of_bounds += 1; + return true; + } + if slot <= root { + stats.slot_out_of_range += 1; + return true; + } + }, + ShredType::Data => { + if index >= MAX_DATA_SHREDS_PER_SLOT as u32 { + stats.index_out_of_bounds += 1; + return true; + } + let Some(parent_offset) = layout::get_parent_offset(shred) else { + stats.bad_parent_offset += 1; + return true; + }; + let Some(parent) = slot.checked_sub(Slot::from(parent_offset)) else { + stats.bad_parent_offset += 1; + return true; + }; + if !blockstore::verify_shred_slots(slot, parent, root) { + stats.slot_out_of_range += 1; + return true; + } + }, + } + match shred_variant { + ShredVariant::LegacyCode | ShredVariant::LegacyData => + if should_drop_legacy_shreds(slot) { + return true; + }, + ShredVariant::MerkleCode { chained: false, .. } => { + stats.num_shreds_merkle_code = stats.num_shreds_merkle_code.saturating_add(1); + }, + ShredVariant::MerkleCode { chained: true, .. } => { + if !enable_chained_merkle_shreds(slot) { + return true; + } + stats.num_shreds_merkle_code_chained = + stats.num_shreds_merkle_code_chained.saturating_add(1); + }, + ShredVariant::MerkleData { chained: false, .. } => { + stats.num_shreds_merkle_data = stats.num_shreds_merkle_data.saturating_add(1); + }, + ShredVariant::MerkleData { chained: true, .. } => { + if !enable_chained_merkle_shreds(slot) { + return true; + } + stats.num_shreds_merkle_data_chained = + stats.num_shreds_merkle_data_chained.saturating_add(1); + }, + } + false +} + */ diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/shred_code.rs b/light-clients/icsxx-cf-solana/src/solana/shred/shred_code.rs new file mode 100644 index 000000000..a4e36dbf3 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/shred_code.rs @@ -0,0 +1,152 @@ +//! File source: solana/ledger/src/shred/shred_code.rs + +use crate::solana::{ + blockstore::MAX_DATA_SHREDS_PER_SLOT, + shred::{ + common::dispatch, + legacy, merkle, + traits::{Shred, ShredCode as ShredCodeTrait}, + CodingShredHeader, ShredCommonHeader, ShredType, SignedData, DATA_SHREDS_PER_FEC_BLOCK, + SIZE_OF_NONCE, + }, + Error, +}; +use alloc::vec::Vec; +use serde::{Deserialize, Serialize}; +use solana_sdk::{clock::Slot, hash::Hash, packet::PACKET_DATA_SIZE, signature::Signature}; +use static_assertions::const_assert_eq; + +const_assert_eq!(MAX_CODE_SHREDS_PER_SLOT, 32_768); +pub const MAX_CODE_SHREDS_PER_SLOT: usize = MAX_DATA_SHREDS_PER_SLOT; + +const_assert_eq!(ShredCode::SIZE_OF_PAYLOAD, 1228); + +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub enum ShredCode { + Legacy(legacy::ShredCode), + Merkle(merkle::ShredCode), +} + +impl ShredCode { + pub(super) const SIZE_OF_PAYLOAD: usize = PACKET_DATA_SIZE - SIZE_OF_NONCE; + + dispatch!(fn coding_header(&self) -> &CodingShredHeader); + + dispatch!(pub(super) fn common_header(&self) -> &ShredCommonHeader); + dispatch!(pub(super) fn erasure_shard(self) -> Result, Error>); + dispatch!(pub(super) fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(pub(super) fn erasure_shard_index(&self) -> Result); + dispatch!(pub(super) fn first_coding_index(&self) -> Option); + dispatch!(pub(super) fn into_payload(self) -> Vec); + dispatch!(pub(super) fn payload(&self) -> &Vec); + dispatch!(pub(super) fn sanitize(&self) -> Result<(), Error>); + dispatch!(pub(super) fn set_signature(&mut self, signature: Signature)); + + // Only for tests. + dispatch!(pub(super) fn set_index(&mut self, index: u32)); + dispatch!(pub(super) fn set_slot(&mut self, slot: Slot)); + + pub(super) fn signed_data(&self) -> Result { + match self { + Self::Legacy(shred) => Ok(SignedData::Chunk(shred.signed_data()?)), + Self::Merkle(shred) => Ok(SignedData::MerkleRoot(shred.signed_data()?)), + } + } + + pub(super) fn merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.merkle_root(), + } + } + + pub(super) fn num_data_shreds(&self) -> u16 { + self.coding_header().num_data_shreds + } + + pub(super) fn num_coding_shreds(&self) -> u16 { + self.coding_header().num_coding_shreds + } + + // Returns true if the erasure coding of the two shreds mismatch. + pub(super) fn erasure_mismatch(&self, other: &ShredCode) -> bool { + match (self, other) { + (Self::Legacy(shred), Self::Legacy(other)) => erasure_mismatch(shred, other), + (Self::Legacy(_), Self::Merkle(_)) => true, + (Self::Merkle(_), Self::Legacy(_)) => true, + (Self::Merkle(shred), Self::Merkle(other)) => { + // Merkle shreds within the same erasure batch have the same + // merkle root. The root of the merkle tree is signed. So + // either the signatures match or one fails sigverify. + erasure_mismatch(shred, other) || + shred.common_header().signature != other.common_header().signature + }, + } + } +} + +impl From for ShredCode { + fn from(shred: legacy::ShredCode) -> Self { + Self::Legacy(shred) + } +} + +impl From for ShredCode { + fn from(shred: merkle::ShredCode) -> Self { + Self::Merkle(shred) + } +} + +#[inline] +pub(super) fn erasure_shard_index(shred: &T) -> Option { + // Assert that the last shred index in the erasure set does not + // overshoot MAX_{DATA,CODE}_SHREDS_PER_SLOT. + let common_header = shred.common_header(); + let coding_header = shred.coding_header(); + if common_header + .fec_set_index + .checked_add(u32::from(coding_header.num_data_shreds.checked_sub(1)?))? as usize >= + MAX_DATA_SHREDS_PER_SLOT + { + return None; + } + if shred + .first_coding_index()? + .checked_add(u32::from(coding_header.num_coding_shreds.checked_sub(1)?))? as usize >= + MAX_CODE_SHREDS_PER_SLOT + { + return None; + } + let num_data_shreds = usize::from(coding_header.num_data_shreds); + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + let position = usize::from(coding_header.position); + let fec_set_size = num_data_shreds.checked_add(num_coding_shreds)?; + let index = position.checked_add(num_data_shreds)?; + (index < fec_set_size).then_some(index) +} + +pub(super) fn sanitize(shred: &T) -> Result<(), Error> { + if shred.payload().len() != T::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(shred.payload().len())); + } + let common_header = shred.common_header(); + let coding_header = shred.coding_header(); + if common_header.index as usize >= MAX_CODE_SHREDS_PER_SLOT { + return Err(Error::InvalidShredIndex(ShredType::Code, common_header.index)); + } + let num_coding_shreds = usize::from(coding_header.num_coding_shreds); + if num_coding_shreds > 8 * DATA_SHREDS_PER_FEC_BLOCK { + return Err(Error::InvalidNumCodingShreds(coding_header.num_coding_shreds)); + } + let _shard_index = shred.erasure_shard_index()?; + let _erasure_shard = shred.erasure_shard_as_slice()?; + Ok(()) +} + +pub(super) fn erasure_mismatch(shred: &T, other: &T) -> bool { + let CodingShredHeader { num_data_shreds, num_coding_shreds, position: _ } = + shred.coding_header(); + *num_coding_shreds != other.coding_header().num_coding_shreds || + *num_data_shreds != other.coding_header().num_data_shreds || + shred.first_coding_index() != other.first_coding_index() +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/shred_data.rs b/light-clients/icsxx-cf-solana/src/solana/shred/shred_data.rs new file mode 100644 index 000000000..7e4c3554a --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/shred_data.rs @@ -0,0 +1,184 @@ +//! File source: solana/ledger/src/shred/shred_data.rs + +use crate::solana::{ + blockstore::MAX_DATA_SHREDS_PER_SLOT, + shred::{ + self, + common::dispatch, + legacy, merkle, + traits::{Shred as _, ShredData as ShredDataTrait}, + DataShredHeader, ShredCommonHeader, ShredFlags, ShredType, ShredVariant, SignedData, + }, + Error, +}; +use alloc::vec::Vec; +use serde::{Deserialize, Serialize}; +use solana_sdk::{clock::Slot, hash::Hash, signature::Signature}; + +#[derive(Clone, Debug, Eq, PartialEq, Deserialize, Serialize)] +pub enum ShredData { + Legacy(legacy::ShredData), + Merkle(merkle::ShredData), +} + +impl ShredData { + dispatch!(fn data_header(&self) -> &DataShredHeader); + + dispatch!(pub(super) fn common_header(&self) -> &ShredCommonHeader); + dispatch!(pub(super) fn data(&self) -> Result<&[u8], Error>); + dispatch!(pub(super) fn erasure_shard(self) -> Result, Error>); + dispatch!(pub(super) fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>); + dispatch!(pub(super) fn erasure_shard_index(&self) -> Result); + dispatch!(pub(super) fn into_payload(self) -> Vec); + dispatch!(pub(super) fn parent(&self) -> Result); + dispatch!(pub(super) fn payload(&self) -> &Vec); + dispatch!(pub(super) fn sanitize(&self) -> Result<(), Error>); + dispatch!(pub(super) fn set_signature(&mut self, signature: Signature)); + + // Only for tests. + dispatch!(pub(super) fn set_index(&mut self, index: u32)); + dispatch!(pub(super) fn set_slot(&mut self, slot: Slot)); + + pub(super) fn signed_data(&self) -> Result { + match self { + Self::Legacy(shred) => Ok(SignedData::Chunk(shred.signed_data()?)), + Self::Merkle(shred) => Ok(SignedData::MerkleRoot(shred.signed_data()?)), + } + } + + pub(super) fn merkle_root(&self) -> Result { + match self { + Self::Legacy(_) => Err(Error::InvalidShredType), + Self::Merkle(shred) => shred.merkle_root(), + } + } + + pub(super) fn new_from_data( + slot: Slot, + index: u32, + parent_offset: u16, + data: &[u8], + flags: ShredFlags, + reference_tick: u8, + version: u16, + fec_set_index: u32, + ) -> Self { + Self::from(legacy::ShredData::new_from_data( + slot, + index, + parent_offset, + data, + flags, + reference_tick, + version, + fec_set_index, + )) + } + + pub(super) fn last_in_slot(&self) -> bool { + let flags = self.data_header().flags; + flags.contains(ShredFlags::LAST_SHRED_IN_SLOT) + } + + pub(super) fn data_complete(&self) -> bool { + let flags = self.data_header().flags; + flags.contains(ShredFlags::DATA_COMPLETE_SHRED) + } + + pub(super) fn reference_tick(&self) -> u8 { + let flags = self.data_header().flags; + (flags & ShredFlags::SHRED_TICK_REFERENCE_MASK).bits() + } + + // Possibly trimmed payload; + // Should only be used when storing shreds to blockstore. + pub(super) fn bytes_to_store(&self) -> &[u8] { + match self { + Self::Legacy(shred) => shred.bytes_to_store(), + Self::Merkle(shred) => shred.payload(), + } + } + + // Possibly zero pads bytes stored in blockstore. + pub(crate) fn resize_stored_shred(shred: Vec) -> Result, Error> { + match shred::layout::get_shred_variant(&shred)? { + ShredVariant::LegacyCode | ShredVariant::MerkleCode { .. } => + Err(Error::InvalidShredType), + ShredVariant::MerkleData { .. } => { + if shred.len() != merkle::ShredData::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(shred.len())); + } + Ok(shred) + }, + ShredVariant::LegacyData => legacy::ShredData::resize_stored_shred(shred), + } + } + + // Maximum size of ledger data that can be embedded in a data-shred. + // merkle_proof_size is the number of merkle proof entries. + // None indicates a legacy data-shred. + pub fn capacity( + merkle_variant: Option<( + u8, // proof_size + bool, // chained + bool, // resigned + )>, + ) -> Result { + match merkle_variant { + None => Ok(legacy::ShredData::CAPACITY), + Some((proof_size, chained, resigned)) => { + debug_assert!(chained || !resigned); + merkle::ShredData::capacity(proof_size, chained, resigned) + }, + } + } + + // Only for tests. + pub(super) fn set_last_in_slot(&mut self) { + match self { + Self::Legacy(shred) => shred.set_last_in_slot(), + Self::Merkle(_) => panic!("Not Implemented!"), + } + } +} + +impl From for ShredData { + fn from(shred: legacy::ShredData) -> Self { + Self::Legacy(shred) + } +} + +impl From for ShredData { + fn from(shred: merkle::ShredData) -> Self { + Self::Merkle(shred) + } +} + +#[inline] +pub(super) fn erasure_shard_index(shred: &T) -> Option { + let fec_set_index = shred.common_header().fec_set_index; + let index = shred.common_header().index.checked_sub(fec_set_index)?; + usize::try_from(index).ok() +} + +pub(super) fn sanitize(shred: &T) -> Result<(), Error> { + if shred.payload().len() != T::SIZE_OF_PAYLOAD { + return Err(Error::InvalidPayloadSize(shred.payload().len())); + } + let common_header = shred.common_header(); + let data_header = shred.data_header(); + if common_header.index as usize >= MAX_DATA_SHREDS_PER_SLOT { + return Err(Error::InvalidShredIndex(ShredType::Data, common_header.index)); + } + let flags = data_header.flags; + if flags.intersects(ShredFlags::LAST_SHRED_IN_SLOT) && + !flags.contains(ShredFlags::DATA_COMPLETE_SHRED) + { + return Err(Error::InvalidShredFlags(data_header.flags.bits())); + } + let _data = shred.data()?; + let _parent = shred.parent()?; + let _shard_index = shred.erasure_shard_index()?; + let _erasure_shard = shred.erasure_shard_as_slice()?; + Ok(()) +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shred/traits.rs b/light-clients/icsxx-cf-solana/src/solana/shred/traits.rs new file mode 100644 index 000000000..e0b95bd63 --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shred/traits.rs @@ -0,0 +1,63 @@ +//! File source: solana/ledger/src/shred/traits.rs + +use crate::solana::shred::{CodingShredHeader, DataShredHeader, Error, ShredCommonHeader}; +use alloc::vec::Vec; +use solana_sdk::{clock::Slot, signature::Signature}; + +pub trait Shred<'a>: Sized { + // Total size of payload including headers, merkle + // branches (if any), zero paddings, etc. + const SIZE_OF_PAYLOAD: usize; + // Size of common and code/data headers. + const SIZE_OF_HEADERS: usize; + + type SignedData: AsRef<[u8]>; + + fn from_payload(shred: Vec) -> Result; + fn common_header(&self) -> &ShredCommonHeader; + fn sanitize(&self) -> Result<(), Error>; + + fn set_signature(&mut self, signature: Signature); + + fn payload(&self) -> &Vec; + fn into_payload(self) -> Vec; + + // Returns the shard index within the erasure coding set. + fn erasure_shard_index(&self) -> Result; + // Returns the portion of the shred's payload which is erasure coded. + fn erasure_shard(self) -> Result, Error>; + // Like Shred::erasure_shard but returning a slice. + fn erasure_shard_as_slice(&self) -> Result<&[u8], Error>; + + // Portion of the payload which is signed. + fn signed_data(&'a self) -> Result; + + // Only for tests. + fn set_index(&mut self, index: u32); + fn set_slot(&mut self, slot: Slot); +} + +pub(super) trait ShredData: for<'a> Shred<'a> { + fn data_header(&self) -> &DataShredHeader; + + fn parent(&self) -> Result { + let slot = self.common_header().slot; + let parent_offset = self.data_header().parent_offset; + if parent_offset == 0 && slot != 0 { + return Err(Error::InvalidParentOffset { slot, parent_offset }); + } + slot.checked_sub(Slot::from(parent_offset)) + .ok_or(Error::InvalidParentOffset { slot, parent_offset }) + } + + fn data(&self) -> Result<&[u8], Error>; +} + +pub(super) trait ShredCode: for<'a> Shred<'a> { + fn coding_header(&self) -> &CodingShredHeader; + + fn first_coding_index(&self) -> Option { + let position = u32::from(self.coding_header().position); + self.common_header().index.checked_sub(position) + } +} diff --git a/light-clients/icsxx-cf-solana/src/solana/shredder.rs b/light-clients/icsxx-cf-solana/src/solana/shredder.rs new file mode 100644 index 000000000..c761d0a0c --- /dev/null +++ b/light-clients/icsxx-cf-solana/src/solana/shredder.rs @@ -0,0 +1,36 @@ +//! File source: solana/ledger/src/shredder.rs + +use crate::solana::{ + shred::{Shred, ShredData}, + Error, +}; +use std::{prelude::rust_2015::Vec, vec}; + +#[derive(Debug)] +pub struct Shredder {} + +impl Shredder { + /// Combines all shreds to recreate the original buffer + pub fn deshred(shreds: &[&Shred]) -> Result, Error> { + let index = shreds.first().ok_or(Error::TooFewDataShards)?.index(); + let aligned = shreds.iter().zip(index..).all(|(s, i)| s.index() == i); + let data_complete = { + let shred = shreds.last().unwrap(); + shred.data_complete() || shred.last_in_slot() + }; + if !data_complete || !aligned { + return Err(Error::TooFewDataShards); + } + let data: Vec<_> = shreds.iter().map(|s| s.data()).collect::>()?; + let data: Vec<_> = data.into_iter().flatten().copied().collect(); + if data.is_empty() { + // For backward compatibility. This is needed when the data shred + // payload is None, so that deserializing to Vec results in + // an empty vector. + let data_buffer_size = ShredData::capacity(/* merkle_proof_size: */ None).unwrap(); + Ok(vec![0u8; data_buffer_size]) + } else { + Ok(data) + } + } +}