From 1f404ac5664626fe0a189ab0f5183d0b5afefdb5 Mon Sep 17 00:00:00 2001 From: Konrad Date: Fri, 3 Jan 2025 10:17:47 +0100 Subject: [PATCH] feat: verify multiple partitions in pallets (#658) --- examples/deals.json | 14 --- examples/declare-fault.sh | 42 ------- examples/fault-declaration.json | 9 -- examples/pre-commit-sector.json | 10 -- examples/prove-commit-sector.json | 6 - examples/publish.sh | 9 -- examples/real-world-use-case-demo.sh | 117 -------------------- examples/rpc_publish.sh | 11 +- examples/rpc_publish_multiple_sectors.sh | 70 ++++++++++++ examples/start_sp.sh | 27 +++++ examples/windowed-proof.json | 9 -- pallets/proofs/src/crypto/groth16.rs | 12 +- pallets/proofs/src/lib.rs | 29 +++-- pallets/proofs/src/porep/mod.rs | 6 +- pallets/proofs/src/post/mod.rs | 50 ++++----- pallets/proofs/src/tests/post.rs | 7 +- pallets/randomness/src/lib.rs | 2 +- pallets/storage-provider/src/lib.rs | 44 +++++--- pallets/storage-provider/src/proofs.rs | 5 +- pallets/storage-provider/src/tests/mod.rs | 18 ++- primitives/src/lib.rs | 11 ++ primitives/src/pallets.rs | 15 ++- storage-provider/server/src/pipeline/mod.rs | 4 +- storagext/lib/artifacts/metadata.scale | Bin 168501 -> 168807 bytes storagext/lib/src/types/storage_provider.rs | 17 ++- 25 files changed, 243 insertions(+), 301 deletions(-) delete mode 100644 examples/deals.json delete mode 100755 examples/declare-fault.sh delete mode 100644 examples/fault-declaration.json delete mode 100644 examples/pre-commit-sector.json delete mode 100644 examples/prove-commit-sector.json delete mode 100755 examples/publish.sh delete mode 100755 examples/real-world-use-case-demo.sh create mode 100755 examples/rpc_publish_multiple_sectors.sh create mode 100755 examples/start_sp.sh delete mode 100644 examples/windowed-proof.json diff --git a/examples/deals.json b/examples/deals.json deleted file mode 100644 index fddb7e9bd..000000000 --- a/examples/deals.json +++ /dev/null @@ -1,14 +0,0 @@ -[ - { - "piece_cid": "bafk2bzacecg3xxc4f2ql2hreiuy767u6r72ekdz54k7luieknboaakhft5rgk", - "piece_size": 1, - "client": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "provider": "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - "label": "dead", - "start_block": 30, - "end_block": 55, - "storage_price_per_block": 1, - "provider_collateral": 1, - "state": "Published" - } -] \ No newline at end of file diff --git a/examples/declare-fault.sh b/examples/declare-fault.sh deleted file mode 100755 index b5dfeb30d..000000000 --- a/examples/declare-fault.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -xe - -cargo build -r -p storagext-cli - -# Export logging level and the storage provider key for simpler scripting -# export RUST_LOG=trace -export SR25519_KEY="//Charlie" - -# We start by creating two market accounts, to do that, we simply need to add money to the market balance account. -# We'll do that for Alice (our client) and for Charlie (our storage provider): - -# Alice, using an explicit key -target/release/storagext-cli --sr25519-key "//Alice" market add-balance 25100200300 - -# Charlie, with the implicit key, read from the SR25519_KEY environment variable -target/release/storagext-cli market add-balance 25100200300 - -# We still don't have a registered storage provider, so let's register Charlie; -# once again, we're using the SR25519_KEY environment variable. -target/release/storagext-cli storage-provider register charlie - -# We then register the deal between Alice and Charlie. -target/release/storagext-cli market publish-storage-deals --client-sr25519-key "//Alice" "@examples/deals.json" - -# The provider now needs to pre-commit the received data, -# if in 100 blocks (the `expiration` field) this data isn't proven, -# the storage provider will receive a penalty (get his funds slashed). -target/release/storagext-cli storage-provider pre-commit "@examples/pre-commit-sector.json" - -# Prove that we've properly stored the client's data. -target/release/storagext-cli storage-provider prove-commit "@examples/prove-commit-sector.json" - -# Let's now pretend that Charlie did an oopsie and the data the client trusted him has an issue, -# to avoid getting an harsh penalty, Charlie needs to assume his mistake by declaring a fault: -target/release/storagext-cli storage-provider declare-faults "@examples/fault-declaration.json" - -# In the meantime, Charlie undid his oopsie and can now say the sector is good for usage again: -target/release/storagext-cli storage-provider declare-faults-recovered "@examples/fault-declaration.json" - -# To fully undo his oopsie, Charlie needs to submit a new proof for the previously faulty sector, -# this ensures that Charlie isn't lying about the sector recovery! -target/release/storagext-cli storage-provider submit-windowed-post "@examples/windowed-proof.json" diff --git a/examples/fault-declaration.json b/examples/fault-declaration.json deleted file mode 100644 index 373ed3918..000000000 --- a/examples/fault-declaration.json +++ /dev/null @@ -1,9 +0,0 @@ -[ - { - "deadline": 0, - "partition": 0, - "sectors": [ - 1 - ] - } -] diff --git a/examples/pre-commit-sector.json b/examples/pre-commit-sector.json deleted file mode 100644 index aa47a8a14..000000000 --- a/examples/pre-commit-sector.json +++ /dev/null @@ -1,10 +0,0 @@ -[ - { - "sector_number": 1, - "sealed_cid": "bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu", - "deal_ids": [0], - "expiration": 100, - "unsealed_cid": "bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu", - "seal_proof": "StackedDRG2KiBV1P1" - } -] \ No newline at end of file diff --git a/examples/prove-commit-sector.json b/examples/prove-commit-sector.json deleted file mode 100644 index f405ebed5..000000000 --- a/examples/prove-commit-sector.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "sector_number": 1, - "proof": "1230deadbeef" - } -] \ No newline at end of file diff --git a/examples/publish.sh b/examples/publish.sh deleted file mode 100755 index b62423ca7..000000000 --- a/examples/publish.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -xe -target/debug/storagext-cli --sr25519-key //Charlie storage-provider register charlie - -target/debug/storagext-cli --sr25519-key //Alice market add-balance 25100200300 -target/debug/storagext-cli --sr25519-key //Charlie market add-balance 25100200300 -RUST_LOG=trace target/debug/storagext-cli --sr25519-key //Charlie market publish-storage-deals --client-sr25519-key //Alice @examples/deals.json - -target/debug/storagext-cli --sr25519-key //Charlie storage-provider pre-commit @examples/pre-commit-sector.json -target/debug/storagext-cli --sr25519-key //Charlie storage-provider prove-commit @examples/prove-commit-sector.json diff --git a/examples/real-world-use-case-demo.sh b/examples/real-world-use-case-demo.sh deleted file mode 100755 index 44420116d..000000000 --- a/examples/real-world-use-case-demo.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash -set -e - -export DISABLE_XT_WAIT_WARNING=1 - -if ! command -v storagext-cli >/dev/null 2>&1; then - echo "Make sure to follow https://eigerco.github.io/polka-storage-book/getting-started/local-testnet.html#native-binaries." - echo "This script relies on having a fresh testnet running and 'storagext-cli' in the PATH." - exit 1 -fi - -# Execute command with the descrption -execute() { - # Print description - echo "-- $1 --" - echo "Command: $2" - - # Execute command and print result - result=$(eval "$2") - - echo "Result: $result" - echo -} - -startup_validate() { - execute 'Wait until the chain starts' "storagext-cli system wait-for-height 1" - height=$(storagext-cli system get-height | awk '{print $3}') - if [[ $height -ne 1 ]]; then - echo "For this script to work, it needs to be run exactly at the second block. Current: $height" - exit 0 - fi -} - -startup_validate - -HUSKY_DEAL='[ - { - "piece_cid": "bafybeihxgc67fwhdoxo2klvmsetswdmwwz3brpwwl76qizbsl6ypro6vxq", - "piece_size": 1278, - "client": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", - "provider": "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", - "label": "My lovely Husky (husky.jpg)", - "start_block": 65, - "end_block": 115, - "storage_price_per_block": 500000000, - "provider_collateral": 12500000000, - "state": "Published" - } -]' -echo "$HUSKY_DEAL" > husky-deal.json - -PRE_COMMIT_HUSKY='{ - "sector_number": 1, - "sealed_cid": "bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu", - "deal_ids": [0], - "expiration": 165, - "unsealed_cid": "bafk2bzaceajreoxfdcpdvitpvxm7vkpvcimlob5ejebqgqidjkz4qoug4q6zu", - "seal_proof": "StackedDRG2KiBV1P1" - -}' -echo "$PRE_COMMIT_HUSKY" > pre-commit-husky.json - -PROVE_COMMIT_HUSKY='{ - "sector_number": 1, - "proof": "beef" -}' -echo "$PROVE_COMMIT_HUSKY" > prove-commit-husky.json - -WINDOWED_POST='{ - "deadline": 0, - "partitions": [0], - "proof": { - "post_proof": "2KiB", - "proof_bytes": "beef" - } -}' -echo "$WINDOWED_POST" >windowed-post.json - -FAULT_DECLARATION='[ - { - "deadline": 0, - "partition": 0, - "sectors": [1] - } -] - -' -echo "$FAULT_DECLARATION" >fault-declaration.json - -PROVING_PERIOD_START=61 -FIRST_DEADLINE_END=81 -SECOND_DEADLINE_START=101 -DEAL_ID=0 -DEAL_END=115 - -execute "Registering Charlie as a storage provider" 'storagext-cli --sr25519-key "//Charlie" storage-provider register Charlie' -execute 'Adding balance to Alice`s account' 'storagext-cli --sr25519-key "//Alice" market add-balance 25000000000' -execute 'Adding balance to Charlie`s account' 'storagext-cli --sr25519-key "//Charlie" market add-balance 12500000000' -execute 'Publishing a storage deal' 'storagext-cli --sr25519-key "//Charlie" market publish-storage-deals --client-sr25519-key "//Alice" "@husky-deal.json"' -execute 'Pre-commit a sector' 'storagext-cli --sr25519-key "//Charlie" storage-provider pre-commit "@pre-commit-husky.json"' -execute 'Prove committed sector' 'storagext-cli --sr25519-key "//Charlie" storage-provider prove-commit "@prove-commit-husky.json"' - -execute 'Wait until the proving period starts' "storagext-cli system wait-for-height $PROVING_PERIOD_START" -execute 'Submitting windowed post' 'storagext-cli --sr25519-key "//Charlie" storage-provider submit-windowed-post "@windowed-post.json"' - -execute 'Wait until the first deadline passes' "storagext-cli system wait-for-height $FIRST_DEADLINE_END" -execute 'Submit fault declaration for the sector' 'storagext-cli --sr25519-key "//Charlie" storage-provider declare-faults "@fault-declaration.json"' -execute 'Declare faults recovered' 'storagext-cli --sr25519-key "//Charlie" storage-provider declare-faults-recovered "@fault-declaration.json"' - -execute 'Wait until the deadline to prove it' "storagext-cli system wait-for-height $SECOND_DEADLINE_START" -execute 'Submitting windowed post' 'storagext-cli --sr25519-key "//Charlie" storage-provider submit-windowed-post "@windowed-post.json"' - -execute 'Wait until the deal end' "storagext-cli system wait-for-height $DEAL_END" -execute 'Settle deal payments' "storagext-cli --sr25519-key //Charlie market settle-deal-payments $DEAL_ID" -execute "Withdraw balance from Charlie's account" 'storagext-cli --sr25519-key "//Charlie" market withdraw-balance 37500000000' - -echo 'Execution finished' diff --git a/examples/rpc_publish.sh b/examples/rpc_publish.sh index d06086184..d22693e42 100755 --- a/examples/rpc_publish.sh +++ b/examples/rpc_publish.sh @@ -36,9 +36,14 @@ target/release/storagext-cli --sr25519-key "$PROVIDER" market add-balance 250000 # register one of them as the storage provider wait -target/release/storagext-cli --sr25519-key "//Charlie" storage-provider register "peer_id" -target/release/storagext-cli --sr25519-key "//Charlie" proofs set-porep-verifying-key @2KiB.porep.vk.scale -target/release/storagext-cli --sr25519-key "//Charlie" proofs set-post-verifying-key @2KiB.post.vk.scale +# It's a test setup based on the local verifying keys, everyone can run those extrinsics currently. +# Each of the keys is different, because the processes are running in parallel. +# If they were running in parallel on the same account, they'd conflict with each other on the transaction nonce. +target/release/storagext-cli --sr25519-key "//Charlie" storage-provider register "peer_id" & +target/release/storagext-cli --sr25519-key "//Alice" proofs set-porep-verifying-key @2KiB.porep.vk.scale & +target/release/storagext-cli --sr25519-key "//Bob" proofs set-post-verifying-key @2KiB.post.vk.scale & + +wait DEAL_JSON=$( jq -n \ diff --git a/examples/rpc_publish_multiple_sectors.sh b/examples/rpc_publish_multiple_sectors.sh new file mode 100755 index 000000000..12a0a1db0 --- /dev/null +++ b/examples/rpc_publish_multiple_sectors.sh @@ -0,0 +1,70 @@ +#!/usr/bin/env bash +set -e + +if [ "$#" -ne 1 ]; then + echo "$0: input file required" + exit 1 +fi + +if [ -z "$1" ]; then + echo "$0: input file cannot be empty" + exit 1 +fi + +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + +# requires the testnet to be running! +export DISABLE_XT_WAIT_WARNING=1 + +CLIENT="//Alice" +PROVIDER="//Charlie" + +INPUT_FILE="$1" +INPUT_FILE_NAME="$(basename "$INPUT_FILE")" +INPUT_TMP_FILE="/tmp/$INPUT_FILE_NAME.car" + +target/release/mater-cli convert -q --overwrite "$INPUT_FILE" "$INPUT_TMP_FILE" && +INPUT_COMMP="$(target/release/polka-storage-provider-client proofs commp "$INPUT_TMP_FILE")" +PIECE_CID="$(echo "$INPUT_COMMP" | jq -r ".cid")" +PIECE_SIZE="$(echo "$INPUT_COMMP" | jq ".size")" + + +for i in $(seq 194 200); +do + DEAL_JSON=$( + jq -n \ + --arg piece_cid "$PIECE_CID" \ + --argjson start_block "$i" \ + --argjson piece_size "$PIECE_SIZE" \ + '{ + "piece_cid": $piece_cid, + "piece_size": $piece_size, + "client": "5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY", + "provider": "5FLSigC9HGRKVhB9FiEo4Y3koPsNmBmLJbpXg2mp1hXcS59Y", + "label": "", + "start_block": $start_block, + "end_block": 250, + "storage_price_per_block": 500, + "provider_collateral": 1250, + "state": "Published" + }' + ) + SIGNED_DEAL_JSON="$(RUST_LOG=error target/release/polka-storage-provider-client sign-deal --sr25519-key "$CLIENT" "$DEAL_JSON")" + + DEAL_CID="$(RUST_LOG=error target/release/polka-storage-provider-client propose-deal "$DEAL_JSON")" + echo "-------------------------- Uploading deal $i..." + echo + curl -X PUT -F "upload=@$INPUT_FILE" "http://localhost:8001/upload/$DEAL_CID" + + echo + echo "-------------------------- Publishing deal $i..." + target/release/polka-storage-provider-client publish-deal "$SIGNED_DEAL_JSON" & + # If we try to prove commit 6 in a single row then we're done. + # we need to throttle prove commits. + # Sleeping until polka-storage#655 is done. + sleep 6 + +done + +# wait until user Ctrl+Cs so that the commitment can actually be calculated +wait diff --git a/examples/start_sp.sh b/examples/start_sp.sh new file mode 100755 index 000000000..73004f5b3 --- /dev/null +++ b/examples/start_sp.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -e + +trap "trap - SIGTERM && kill -- -$$" SIGINT SIGTERM EXIT + +# requires the testnet to be running! +export DISABLE_XT_WAIT_WARNING=1 + +CLIENT="//Alice" +PROVIDER="//Charlie" + +# Setup balances +RUST_LOG=debug target/release/storagext-cli --sr25519-key "$CLIENT" market add-balance 250000000000 & +RUST_LOG=debug target/release/storagext-cli --sr25519-key "$PROVIDER" market add-balance 250000000000 & +# We can process a transaction by charlie and alice, but we can't in the same transaction +# register one of them as the storage provider +wait + +# It's a test setup based on the local verifying keys, everyone can run those extrinsics currently. +# Each of the keys is different, because the processes are running in parallel. +# If they were running in parallel on the same account, they'd conflict with each other on the transaction nonce. +RUST_LOG=debug target/release/storagext-cli --sr25519-key "//Charlie" storage-provider register "peer_id" & +RUST_LOG=debug target/release/storagext-cli --sr25519-key "//Alice" proofs set-porep-verifying-key @2KiB.porep.vk.scale & +RUST_LOG=debug target/release/storagext-cli --sr25519-key "//Bob" proofs set-post-verifying-key @2KiB.post.vk.scale & +wait + +RUST_LOG=debug target/release/polka-storage-provider-server --sr25519-key "$PROVIDER" --seal-proof "2KiB" --post-proof "2KiB" --porep-parameters 2KiB.porep.params --post-parameters 2KiB.post.params diff --git a/examples/windowed-proof.json b/examples/windowed-proof.json deleted file mode 100644 index 0add0a5ea..000000000 --- a/examples/windowed-proof.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "deadline": 0, - "partition": 0, - "proof": { - "sector_number": 1, - "proof_bytes": "1230deadbeef", - "post_proof": "StackedDRGWindow2KiBV1P1" - } -} diff --git a/pallets/proofs/src/crypto/groth16.rs b/pallets/proofs/src/crypto/groth16.rs index 7535b7e8f..81daaa5d1 100644 --- a/pallets/proofs/src/crypto/groth16.rs +++ b/pallets/proofs/src/crypto/groth16.rs @@ -15,7 +15,7 @@ use crate::Vec; /// - /// - #[derive(Clone, Decode, Default, Encode)] -struct PreparedVerifyingKey { +pub(crate) struct PreparedVerifyingKey { pub alpha_g1_beta_g2: E::Gt, pub neg_gamma_g2: E::G2Prepared, pub neg_delta_g2: E::G2Prepared, @@ -36,11 +36,13 @@ impl From> for PreparedVerifyingKey { } } -/// Method generates the `PreparedVerifyingKey` from the `VerifyingKey`. +/// Generates the `PreparedVerifyingKey` from the `VerifyingKey`. /// /// References: /// - -fn prepare_verifying_key(vkey: VerifyingKey) -> PreparedVerifyingKey { +pub(crate) fn prepare_verifying_key( + vkey: VerifyingKey, +) -> PreparedVerifyingKey { PreparedVerifyingKey::::from(vkey) } @@ -52,12 +54,10 @@ fn prepare_verifying_key(vkey: VerifyingKey) -> PreparedV /// - /// - pub fn verify_proof( - vk: VerifyingKey, + pvk: &PreparedVerifyingKey, proof: &Proof, public_inputs: &[E::Fr], ) -> Result<(), VerificationError> { - let pvk = prepare_verifying_key(vk); - if (public_inputs.len() + 1) != pvk.ic.len() { return Err(VerificationError::InvalidVerifyingKey); } diff --git a/pallets/proofs/src/lib.rs b/pallets/proofs/src/lib.rs index ffbc9f11d..1a721df28 100644 --- a/pallets/proofs/src/lib.rs +++ b/pallets/proofs/src/lib.rs @@ -31,7 +31,7 @@ pub mod pallet { pallets::ProofVerification, proofs::{ProverId, PublicReplicaInfo, RegisteredPoStProof, RegisteredSealProof, Ticket}, sector::SectorNumber, - MAX_POST_PROOF_BYTES, MAX_SEAL_PROOF_BYTES, MAX_SECTORS_PER_PROOF, + MAX_POST_PROOF_BYTES, MAX_PROOFS_PER_BLOCK, MAX_REPLICAS_PER_BLOCK, MAX_SEAL_PROOF_BYTES, }; use crate::{ @@ -165,12 +165,15 @@ pub mod pallet { replicas: BoundedBTreeMap< SectorNumber, PublicReplicaInfo, - ConstU32, + ConstU32, + >, + proofs: BoundedVec< + BoundedVec>, + ConstU32, >, - proof: BoundedVec>, ) -> DispatchResult { let replica_count = replicas.len(); - ensure!(replica_count <= post_type.sector_count(), { + ensure!(replica_count <= post_type.sector_count() * proofs.len(), { log::error!( target: LOG_TARGET, "Got more replicas than expected. Expected max replicas = {}, submitted replicas = {replica_count}", @@ -178,15 +181,23 @@ pub mod pallet { ); Error::::InvalidPoStProof }); - let proof = Proof::::decode(&mut proof.as_slice()).map_err(|e| { - log::error!(target: LOG_TARGET, "failed to parse PoSt proof {:?}", e); - Error::::Conversion - })?; + let mut parsed_proofs = BoundedVec::new(); + for (index, proof) in proofs.into_iter().enumerate() { + let proof = Proof::::decode(&mut proof.as_slice()).map_err(|e| { + log::error!(target: LOG_TARGET, "failed to parse PoSt proof (idx: {}){:?}", index, e); + Error::::Conversion + })?; + + parsed_proofs.try_push(proof).expect( + "internal (post::ProofScheme) and external (ProofVerification) apis have the same limits on number of proofs", + ); + } + let proof_scheme = post::ProofScheme::setup(post_type); let vkey = PoStVerifyingKey::::get().ok_or(Error::::MissingPoStVerifyingKey)?; proof_scheme - .verify(randomness, replicas.clone(), vkey, proof) + .verify(randomness, replicas.clone(), vkey, parsed_proofs) .map_err(|e| { log::warn!(target: LOG_TARGET, "failed to verify PoSt proof: {:?}, for replicas: {:?}", e, replicas); Error::::InvalidPoStProof diff --git a/pallets/proofs/src/porep/mod.rs b/pallets/proofs/src/porep/mod.rs index ab4c1afee..de0c82e31 100644 --- a/pallets/proofs/src/porep/mod.rs +++ b/pallets/proofs/src/porep/mod.rs @@ -10,7 +10,8 @@ use sha2::{Digest, Sha256}; use crate::{ crypto::groth16::{ - verify_proof, Bls12, Fr, PrimeField, Proof, VerificationError, VerifyingKey, + prepare_verifying_key, verify_proof, Bls12, Fr, PrimeField, Proof, VerificationError, + VerifyingKey, }, fr32, graphs::{ @@ -160,8 +161,9 @@ impl ProofScheme { }; let public_inputs = self.generate_public_inputs(public_inputs, None)?; + let pvk = prepare_verifying_key(vk); - verify_proof(vk, proof, public_inputs.as_slice()).map_err(Into::::into) + verify_proof(&pvk, proof, public_inputs.as_slice()).map_err(Into::::into) } /// References: diff --git a/pallets/proofs/src/post/mod.rs b/pallets/proofs/src/post/mod.rs index 22107ebb3..9787b52c5 100644 --- a/pallets/proofs/src/post/mod.rs +++ b/pallets/proofs/src/post/mod.rs @@ -7,12 +7,14 @@ use primitives::{ commitment::RawCommitment, proofs::{PublicReplicaInfo, RegisteredPoStProof, Ticket}, sector::SectorNumber, - MAX_SECTORS_PER_PROOF, NODE_SIZE, + MAX_PROOFS_PER_BLOCK, MAX_REPLICAS_PER_BLOCK, NODE_SIZE, }; use sha2::{Digest, Sha256}; use crate::{ - crypto::groth16::{verify_proof, Bls12, Fr, Proof, VerificationError, VerifyingKey}, + crypto::groth16::{ + prepare_verifying_key, verify_proof, Bls12, Fr, Proof, VerificationError, VerifyingKey, + }, fr32, Vec, }; @@ -40,9 +42,13 @@ impl ProofScheme { pub fn verify( &self, randomness: Ticket, - replicas: BoundedBTreeMap>, + replicas: BoundedBTreeMap< + SectorNumber, + PublicReplicaInfo, + ConstU32, + >, vk: VerifyingKey, - proof: Proof, + proofs: BoundedVec, ConstU32>, ) -> Result<(), ProofError> { let randomness = fr32::bytes_into_fr(&randomness) .map_err(|_| ProofError::Conversion)? @@ -54,26 +60,13 @@ impl ProofScheme { ) .unwrap_or(1); - if required_partitions != 1 { - // We don't support more than 1 partition in this method right now. + // Proof per partition + if proofs.len() != required_partitions { + log::error!(target: LOG_TARGET, "Expected 1 proof per 1 partition, got {} proofs, {} partitions", + proofs.len(), required_partitions); return Err(ProofError::InvalidNumberOfProofs); } - // NOTE: - // * This is checked after the required partitions on purpose! - // * Once we support verification of multiple partitions this check should be done for every partition - let replica_count = replicas.len(); - ensure!( - replica_count <= self.config.challenged_sectors_per_partition, - { - log::error!( - target: LOG_TARGET, - "Got more replicas than expected. Expected max replicas = {}, submitted replicas = {replica_count}", - self.config.challenged_sectors_per_partition - ); - ProofError::InvalidNumberOfReplicas - } - ); let pub_sectors: Vec<_> = replicas .iter() .map(|(sector_id, replica)| { @@ -89,9 +82,16 @@ impl ProofScheme { randomness, sectors: pub_sectors, }; + let pvk = prepare_verifying_key(vk); + + for partition_index in 0..proofs.len() { + let inputs = + self.generate_public_inputs(public_inputs.clone(), Some(partition_index))?; + verify_proof(&pvk, &proofs[partition_index], inputs.as_slice()).inspect_err(|_| { + log::error!(target: LOG_TARGET, "failed to verify partition {}", partition_index); + })?; + } - let inputs = self.generate_public_inputs(public_inputs, None)?; - verify_proof(vk, &proof, inputs.as_slice())?; Ok(()) } @@ -160,8 +160,6 @@ impl ProofScheme { pub enum ProofError { InvalidNumberOfSectors, InvalidNumberOfProofs, - /// Returned when the given replicas exceeds the maximum amount set by the SP. - InvalidNumberOfReplicas, /// Returned when the given proof was invalid in a verification. InvalidProof, /// Returned when the given verifying key was invalid. @@ -179,11 +177,13 @@ impl From for ProofError { } } +#[derive(Clone)] struct PublicInputs { randomness: RawCommitment, sectors: Vec, } +#[derive(Clone)] struct PublicSector { id: SectorNumber, comm_r: Fr, diff --git a/pallets/proofs/src/tests/post.rs b/pallets/proofs/src/tests/post.rs index 9d10095cf..ad20378ec 100644 --- a/pallets/proofs/src/tests/post.rs +++ b/pallets/proofs/src/tests/post.rs @@ -10,6 +10,7 @@ use primitives::{ }; use rand::SeedableRng; use rand_xorshift::XorShiftRng; +use sp_core::bounded_vec; use sp_runtime::{BoundedBTreeMap, BoundedVec}; use sp_std::collections::btree_map::BTreeMap; @@ -44,7 +45,7 @@ fn post_verification_succeeds() { post_type, randomness, BoundedBTreeMap::try_from(replicas).expect("replicas should be valid"), - BoundedVec::try_from(proof_bytes).expect("proof_bytes should be valid"), + bounded_vec![BoundedVec::try_from(proof_bytes).expect("proof_bytes should be valid")], )); }); } @@ -66,7 +67,9 @@ fn post_verification_fails() { post_type, randomness, BoundedBTreeMap::try_from(replicas).expect("replicas should be valid"), - BoundedVec::try_from(proof_bytes).expect("proof_bytes should be valid"), + bounded_vec![ + BoundedVec::try_from(proof_bytes).expect("proof_bytes should be valid") + ], ), Error::::InvalidPoStProof ); diff --git a/pallets/randomness/src/lib.rs b/pallets/randomness/src/lib.rs index dec8b5b03..cd65493a9 100644 --- a/pallets/randomness/src/lib.rs +++ b/pallets/randomness/src/lib.rs @@ -71,7 +71,7 @@ pub mod pallet { #[pallet::storage] #[pallet::getter(fn author_vrf_history)] pub type AuthorVrfHistory = - CountedStorageMap<_, Blake2_128, BlockNumberFor, T::Hash>; + CountedStorageMap<_, Blake2_128Concat, BlockNumberFor, T::Hash>; #[pallet::call] impl Pallet { diff --git a/pallets/storage-provider/src/lib.rs b/pallets/storage-provider/src/lib.rs index 325a9385b..de1b7724f 100644 --- a/pallets/storage-provider/src/lib.rs +++ b/pallets/storage-provider/src/lib.rs @@ -372,6 +372,9 @@ pub mod pallet { CannotTerminateImmutableDeadline, /// Emitted when trying to submit PoSt with partitions containing too many sectors (>2349). TooManyReplicas, + /// SubmitWindowedPoSt must accept the same number of proofs as ProofVerification trait. + /// Internal error, should not happen. + TooManyProofs, /// AuthorVRF lookup failed. MissingAuthorVRF, /// Inner pallet errors @@ -683,27 +686,27 @@ pub mod pallet { let mut sp = StorageProviders::::try_get(&owner) .map_err(|_| Error::::StorageProviderNotFound)?; - // Ensure proof matches the expected kind - ensure!( - windowed_post.proof.post_proof == sp.info.window_post_proof_type, - { + for (idx, proof) in windowed_post.proofs.iter().enumerate() { + // Ensure proof matches the expected kind + ensure!(proof.post_proof == sp.info.window_post_proof_type, { log::error!( target: LOG_TARGET, - "submit_window_post: expected PoSt type {:?} but received {:?} instead", + "submit_window_post: idx: {}, expected PoSt type {:?} but received {:?} instead", + idx, sp.info.window_post_proof_type, - windowed_post.proof.post_proof + proof.post_proof ); Error::::InvalidProofType - } - ); + }); - ensure!( - windowed_post.proof.proof_bytes.len() <= primitives::MAX_POST_PROOF_BYTES as usize, - { - log::error!("submit_window_post: invalid proof size"); - Error::::PoStProofInvalid - } - ); + ensure!( + proof.proof_bytes.len() <= primitives::MAX_POST_PROOF_BYTES as usize, + { + log::error!("submit_window_post: invalid proof size"); + Error::::PoStProofInvalid + } + ); + } // If the proving period is in the future, we can't submit a proof yet // Related issue: https://github.com/filecoin-project/specs-actors/issues/946 @@ -799,11 +802,18 @@ pub mod pallet { &entropy, )?; + let mut proofs = BoundedVec::new(); + for proof in windowed_post.proofs { + proofs + .try_push(proof.proof_bytes) + .map_err(|_| Error::::TooManyProofs)?; + } + T::ProofVerification::verify_post( - windowed_post.proof.post_proof, + sp.info.window_post_proof_type, randomness, replicas, - windowed_post.proof.proof_bytes, + proofs, )?; log::debug!(target: LOG_TARGET, "submit_windowed_post: proof recorded"); diff --git a/pallets/storage-provider/src/proofs.rs b/pallets/storage-provider/src/proofs.rs index 51a320a63..d0cb56c0b 100644 --- a/pallets/storage-provider/src/proofs.rs +++ b/pallets/storage-provider/src/proofs.rs @@ -4,7 +4,8 @@ use frame_support::{ sp_runtime::BoundedVec, }; use primitives::{ - proofs::RegisteredPoStProof, PartitionNumber, MAX_PARTITIONS_PER_DEADLINE, MAX_POST_PROOF_BYTES, + proofs::RegisteredPoStProof, PartitionNumber, MAX_PARTITIONS_PER_DEADLINE, + MAX_POST_PROOF_BYTES, MAX_PROOFS_PER_BLOCK, }; use scale_info::TypeInfo; use sp_core::blake2_64; @@ -29,7 +30,7 @@ pub struct SubmitWindowedPoStParams { /// The partition being proven. pub partitions: BoundedVec>, /// The proof submission. - pub proof: PoStProof, + pub proofs: BoundedVec>, } /// Error type for proof operations. diff --git a/pallets/storage-provider/src/tests/mod.rs b/pallets/storage-provider/src/tests/mod.rs index 04812e01f..e5e36653a 100644 --- a/pallets/storage-provider/src/tests/mod.rs +++ b/pallets/storage-provider/src/tests/mod.rs @@ -20,7 +20,8 @@ use primitives::{ proofs::{ProverId, PublicReplicaInfo, RegisteredPoStProof, RegisteredSealProof, Ticket}, sector::SectorNumber, DealId, PartitionNumber, CID_SIZE_IN_BYTES, MAX_DEALS_PER_SECTOR, MAX_PARTITIONS_PER_DEADLINE, - MAX_POST_PROOF_BYTES, MAX_SEAL_PROOF_BYTES, MAX_SECTORS_PER_PROOF, MAX_TERMINATIONS_PER_CALL, + MAX_POST_PROOF_BYTES, MAX_PROOFS_PER_BLOCK, MAX_REPLICAS_PER_BLOCK, MAX_SEAL_PROOF_BYTES, + MAX_TERMINATIONS_PER_CALL, }; use sp_arithmetic::traits::Zero; use sp_core::{bounded_vec, Pair}; @@ -85,7 +86,9 @@ impl pallet_balances::Config for Test { pub const INVALID_PROOF: [u8; 2] = [0xd, 0xe]; -/// This is dummy proofs pallet implementation. All proofs are accepted as valid +/// This is dummy proofs pallet implementation. +/// All PoRep proofs are accepted as valid. +/// All PoSt proofs are accepted as valid unless first of them is [`INVALID_PROOF`]. pub struct DummyProofsVerification; impl ProofVerification for DummyProofsVerification { fn verify_porep( @@ -107,11 +110,14 @@ impl ProofVerification for DummyProofsVerification { _replicas: BoundedBTreeMap< SectorNumber, PublicReplicaInfo, - ConstU32, + ConstU32, + >, + proofs: BoundedVec< + BoundedVec>, + ConstU32, >, - proof: BoundedVec>, ) -> sp_runtime::DispatchResult { - if *proof == INVALID_PROOF { + if *proofs[0] == INVALID_PROOF { return Err(sp_runtime::DispatchError::Other("invalid proof")); } Ok(()) @@ -551,7 +557,7 @@ impl SubmitWindowedPoStBuilder { SubmitWindowedPoStParams { deadline: self.deadline, partitions: self.partitions, - proof: self.proof, + proofs: bounded_vec![self.proof], } } } diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index d196f3749..da066eccc 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -17,6 +17,14 @@ pub const NODE_SIZE: usize = 32; /// ref: pub const MAX_PARTITIONS_PER_DEADLINE: u32 = 3000; +/// Establishes how many partitions can we verify in a single extrinsic. +/// It's determined by the timing limitations, storage provider have an upper limit of 3000 partitions per deadline. +/// With our current verification solution, it'll take around ~30 extrinsic calls to verify all of them. +/// Verification of a single proof takes around ~100ms, block time is ~6000ms. +/// This means 10 partitions will be verified in a ~1 sec. +// TODO(@th7nder,#659,27/12/2024): possibly speed it up +pub const MAX_PROOFS_PER_BLOCK: u32 = 10; + /// Max number of sectors. /// pub const MAX_SECTORS: u32 = 32 << 20; @@ -50,6 +58,9 @@ pub const MAX_TERMINATIONS_PER_CALL: u32 = 32; // TODO(@jmg-duarte,25/07/2024): /// * Filecoin docs about PoSt: pub const MAX_SECTORS_PER_PROOF: u32 = 2349; +/// The maximum amount of replicas that can be processed in a single block. +pub const MAX_REPLICAS_PER_BLOCK: u32 = MAX_SECTORS_PER_PROOF * MAX_PROOFS_PER_BLOCK; + /// The absolute maximum length, in bytes, a seal proof should be for the largest sector size. /// NOTE: Taken the value from `StackedDRG32GiBV1`, /// which is not the biggest seal proof type but we do not plan on supporting non-interactive proof types at this time. diff --git a/primitives/src/pallets.rs b/primitives/src/pallets.rs index aa5eeefca..3cd3ab9ef 100644 --- a/primitives/src/pallets.rs +++ b/primitives/src/pallets.rs @@ -9,8 +9,8 @@ use crate::{ proofs::{ProverId, PublicReplicaInfo, RegisteredPoStProof, RegisteredSealProof, Ticket}, sector::SectorNumber, DealId, PartitionNumber, MAX_DEALS_PER_SECTOR, MAX_PARTITIONS_PER_DEADLINE, - MAX_POST_PROOF_BYTES, MAX_SEAL_PROOF_BYTES, MAX_SECTORS, MAX_SECTORS_PER_CALL, - MAX_SECTORS_PER_PROOF, + MAX_POST_PROOF_BYTES, MAX_PROOFS_PER_BLOCK, MAX_REPLICAS_PER_BLOCK, MAX_SEAL_PROOF_BYTES, + MAX_SECTORS, MAX_SECTORS_PER_CALL, }; pub trait StorageProviderValidation { @@ -34,8 +34,15 @@ pub trait ProofVerification { fn verify_post( post_type: RegisteredPoStProof, randomness: Ticket, - replicas: BoundedBTreeMap>, - proof: BoundedVec>, + replicas: BoundedBTreeMap< + SectorNumber, + PublicReplicaInfo, + ConstU32, + >, + proof: BoundedVec< + BoundedVec>, + ConstU32, + >, ) -> DispatchResult; } diff --git a/storage-provider/server/src/pipeline/mod.rs b/storage-provider/server/src/pipeline/mod.rs index 8600581ee..f9df2e1ed 100644 --- a/storage-provider/server/src/pipeline/mod.rs +++ b/storage-provider/server/src/pipeline/mod.rs @@ -738,10 +738,10 @@ async fn submit_windowed_post( SubmitWindowedPoStParams { deadline: deadline_index, partitions: partitions, - proof: PoStProof { + proofs: vec![PoStProof { post_proof: state.server_info.post_proof, proof_bytes: proof, - }, + }], }, true, ) diff --git a/storagext/lib/artifacts/metadata.scale b/storagext/lib/artifacts/metadata.scale index 39b8c3a0404295b8f94db0dbf30fdf7fc4c388f4..f336e00cde96f5492f1158c613ed86855ae4c051 100644 GIT binary patch delta 4664 zcmZu!eNf@lEIs1XaovtojLsg+I%y}|`vd>`;W6r5^A zYZP5fVqVFXR+BhN)*=as)H*k2N*mHeTsDg~8g=R)&V=bW1``@hg2ri0XP^6?1Tbse zb?&+6?DO0E{LVgm?|b^hgzK+NP&|52n-t%z{Qd$|gwJ32XK1CiGW`07cIl7|J>lg3 zABEE{+M!JhpL+e`+ee_psBM5!fDOjmes~R_+^7jcxg|fL-tY6(1af7#xXKsuR_oOp z^{N%czOrDkm`Jnd`MiPPhDDhx3UfDA7MB(ktuCu9c9vGIdZH-r@s%qXTPBBl%hFM4 zgrxAhWsk%=WvPrYi51F_Ah6bxj1!X^p%MzPxe*re0&D%ChL6x%^kw^dtg<9ONRWSnwqLQ&E+j~)q1sH$geZz!Z-Fn3V85OdtfHL{<;V3 z=?yaE=vCDLZTa$VL^)#~GT)NJ7_+hl(aklxAsKz|!8F|bQ&<5__|i{lQ|4X;V2Tb+Tu0=OE zG)nz7*&5EhJ{>=J9v+23w1%P7G8i4(76!+ZAz|M&ncXrCb8x4H4dcl$JYl&T6-)dX zJjw6L;hgI;jbHo>HUkF*|K0{ZE-P670;EC={^AAL19A8;!hA9M1j3x9F^ZHBIYwdi zLFUe}PvN**W+gRnnWP~aB8-%q}<4HoY+TuA&6usE= zc{Y}$h&wqqS&7i)P`8w!oB4Agc?7FnyL7pZpv) zTQT?K3HS-P za7_oKahEV_mBpjPV)1c^H$Lcq=Pl5H=M9)1uG|)f|I!IADWp(1YJC+PXnz%4(1HhE zh0|~pb6$gm@pl!-y68o?TQ^=)_&=lSF4vaHkGKB%cpAmj%mtw%{ zkN{abl&H@ZYV}s>afmJ=$89F-&4%w)7(H~pg}g~&E#p+jzHI~>DL-heFhcN4Sr127}LBdT(E)M0jv zta=1G@#LH2P1kV#N$|iB{@+O`#Q!}>1epCs-Z`>~)R?X)*p-O=T_%okqZ@Vubff1K zWJ%`~=)#33;i0e*%17HNu*zw~c6UVO!e7Co(rJaP6(YFrS1^~K9#Jr7Vmh6}nW&PU zh>|lSE93Cd({LiQXBBN$F=4cJ3Q|T=Q=b3;iZ=W&gL}w2L@& zUf_s^Us4!70*5B9jFY#KtzF`9t_pe4@BxL9q~2x^!z6&o5c*o>mM(*PB>o`tx_Zrxu-aa05#8!gEeQFB;m?@-v?aRTWiP%bd< z2^Q0!-uT*ptuaq$t@rr~HE)wyGz3_pwFJsSn?0`J4_w}A-!{EkR1|hkC=j%3RaJU@ z&>pPQ?E%fB+r4J~v-@g9mZnm~t_AES%tqbss&Q3mBFWig#py!kj`^AmH-W=7xfGKL5tj)oi--;7c^%U2scWRz-r}fSG3@ zdr?;83?{1Mve86!Y&Oaw)@}*b;?(;wB61SGv!H&LV#(NK7O#pUXN45e@%gim%9B)4 z&=?ob!o3(s#tY{m@1Gnh*uS5VTT|nrbOGPpOeJAuZN|5bW~K<)%=@zTTFv7j;qk-O zx;uh_^%r1j{@6uUSJeU)vP405Un|{02U%)Kr;S-zis=5>>@J`#r>P7T>-WJ_{OAJQ z=8IJ<-cNPj=O02NU#8+8_tVVoi-6O!NnDzpt2)&JwNNcl%hd{1Q+1W);`gr8io#2f zj`C$Xj>DH=KCTkI|GWfqDXy6F_RBC6S6-*lipwx(!Uh#$oqmn64S4GYMfZQb3`cB~ zOGwrU6_F(`U8m5d;XBu19q2geV_1We24N+(eGJ=E8r0D_Iix<2lS5=M<(m-h`WWJ| za1fFxxNjMx3ZQA^d{dKls1fgIR$-}`Dk#jacs$L%k$Bq?S-Ro|Y{+Q7?<~7Gqm+EX&XFD5%ewR1ks)BZvirs`dU8~mpZ7L+J zF?$T=>GwqfN68v-h$D;0nl_Ut41EGGQE4bh!_-e<^$eFhMTluYCnT|@WX(~sf|HhG zc<@tL3mtg*Q%EW6979;ggM@WN2Qx360W ztJdsIj-@Yd;m-5bo;g5*Bu1^P_<;Z0bQerb&5 zSJcs_UlF*YZD;HXzH<{UJa%;qF9Q$qG9b3fn0WDPDx-&tTG}JCL45La+U3n#^cQ0J z0%l|L=j89Vu=8`sOu01%cKAWq;Rx(-1Z3_PP|feE;gjA(eB}$+#qX)%bw{V-$}gcQ z_1+jPus-1HU^PtyD-L`K1zfgb+AtOMF}P_M=1!PMvs|{YIBR&|{b~5}F#MW4sNyy( zmbzp(f&B*{5#PK`7o->aZ$moV!>?~cHk9CsJMf6)QQ$c&IZl=Ao;%=>?kVtV%-u?r z#_#UHY)Hb7?@)@#!1%9dk_^-b&|x0P>K1nY@^k|0$CvK_A}ExULf#v)$xNE$(oX=8d3pa9jE!Fo%SuXDbx-5x#d=tI=d3>`a zUP;9N$m1`Q^^N)bOGrVFldrL4D2wqeC*KH8W72B=vV|BpSwQ0(lpywgMZyVd`5ud= z=*Ej{d6rDa{$Ua6Y%nGj^X|!{v$u*nEV8;wNZXB=YW{Z%gfOO#Ux66o&vpEl0CC0( zPw^$fp#Ckq!d4-}X+Fgs$!rDHiNBkq@jLZ=zR}mf z^T66F!!^zz!>|Q-HYkUv93b+`g8WJPef(vRpNd_k&SBCrsYG(g+1L=`@in>Xk(|6b zy=qHYyQ!#d<$3{bo6gE@n@Ek6<+RN#$%2@CWw*y=?*6ri7V1BzA>(;%tL6zky5>8Er`3xN$8&$jS+U~Nb^0^`M2USWZW#^0VJf3U<^+5ZC`n^qJ6 delta 4350 zcmZu!eNa`^7T;^%^Kl`~YXX8sT~tt16bwy5imRfap(0|BMzSj$x%u2jzgZGaEfK#KQg z*DyR>1T%yL5x&3;We|viiXjjZ&{hFa_~cA@iYJNQ7FQ6yTMU^z`C(ter%E6XQn0iH zDj^LwY=)s~=o|2+2WwgGkX>YE zu#$wUTm|;rJp6tmOb@h+Fmrx(c4m&<;Y@WbciKILZiO*BhHrv!P%w29jG$)qCa_L) zh!C%2W);|9c;OM0Gp2})nc^8^X6DekHoco?Z2g~25C{3Vt_()tv@)<6{ZF6}LpH;3 z8a*@sO_6emP>km`!$@j0RZ-jmMX(mvZGllnGnBKncw!5L`fi}jc!tQzMDNmy0EDe@ z2Fmf=R#?X?Mf~_O1mNrC;D+sZv7Ah}Q`>3=R2kN)?zUDbvhjCcsIZKiDo9r*Wzm|0#{d zF0e+7-vMDyoYg5BMGHHR`8!}{bdw0`|rbRyi@eX|1v@i*a^#kcZt}s3x;6j2e5?S7xC8*U@QRA0c->x{19QB zHhUT&Y!XO9;QUn{hs&u+FG^sx%yT&vlHZc*%2Na;N{j*18^LTgGar0*7i0wtq-nuy z@#k!?)*BEx#=`t1@2S+t(Ls`7*I?<8fe_L2Lo{Sab!l)*o_n?I%(|74wicOZ+==xG#ZU~S#lgkD&ijtwNy z6ErXllXO|sl8<1X8Io~gE#2y$K7|$J({uMiQg})qBc(lVq%As_)RU0=XOo7=_W-QfKj#Jug?7pc9_6{Er(#3cj=nJ=<_)^ggl7?iFqHH z~ z&i_@ZtiHLQq8fSf&4b|gY`#R-wU|WJE524@ROn!nIeppCpR}x-wA>&q(@F8AgK(I< zTH|}BOyjH6__T^|mlzeDkG|~G@FOwd5Ln5_vkyTuuhNfcNOd=H`=et*>mkx!I1FJz zjRa-5=Q@nT^uzG7(5%_UdJLYy(=>!uEUc$Pv)7ZFdcTO*JQ94Mg^W?#P2Z%;jXxcN z;i_^3-U6t@Ye(tkoxbsFZ&{sNwY9JTlhSnV@3 zakk=A-1giB_S{^>li+r{+yd*8@Z{I@+yr6E*K{yl`a1QwZy;CT_a&@34QU+Yp8Kzh z8x(EmB+C8774hXW5XgP{!be6LaB{4^(S4w-8H^=Ul*50Vp&;sydrs5qE}ez@J|vKE z5oW9pmccqZ%9fq&ptuQ7FQc4g>GG&Qot+e-VJG!qt;_9sc?9l1ou%ad35T_Nx*vLq z*^wD4L$sDq^oT(bA(%urMUtJ(EJW+Ru6IMJ%jr|NOb>D(`dxzCJVM5Ax6-x0Rt^I& z@-pO7ysf?r2Zv22urMk{j+1S2f}A9$$Z2xAoFTIq%)dqp*8d2R5NGty{YVe2&FEXM zz-Y>wbj-pllvQ;82*YvP6$tZ9mSI4G+s;_B`qdSvw@`#7SOPh;j&8aJR#UocMz}>Q z2DL#HWT34L62t9s?{`X(AA6@15|%=+f=Ak58${e=(ks3}W>hqvDwJ87jJ6&0 zr1{){AYLweFK|DcdIS7tRm!6hiac(Iv%rxF+hxk}0&^65mg26IAuv(zF&LxtKniw} zOxkHaTtqTeYJC3qw;S*=j<`X2(~@?WHQX-xX&^h$Q3-4+iM5l&;j~8;o@|FXP>tr_ zAbfF6AL~^=ZoO*5dbNaGs1y|Ddd%d@yMzR&BdmG;N`dVnazu})jbhvfD zK^R5L3%|jL;q@{kra4zSU2B|++;(SyJyXjpv~xM25?BDJ*A|V$$v0shG~lM25DLff z^P4a>>UckMM*tnuR8s3CZKqfKl+37*xg+R9BZ?ica7cp;kx9-R#jQ9!GwmL_T4xQp z-sylTk>~p;)Fk)Ts7d4Pt5TD()b9^C9DBKs&8|Lfv#W*~w1zj!j0#CMwLxSp_}(qj zal|c3XII;2;gkkz^$l@Kc_pmbv|8*Nmd8h21R~>+> zI-z)MXCI=j$BDWO;ksn+AlF!oy93Yi`!c?EhrAT9;tq`V70o2KXktEQZ_Bqs@rOHb zg7WZfcVV(nE5cs75vPkNy;r(0LCg(jUji_h6dPDZvS}t)_&j z^B(2@{y3})#zQ#H?xGbDSfn*kYF!un^EqN)6~Wh07V~=qe-*0KITQF+4z=pVNFG2> zv3fmi3iRB!IKXpXVRId9~_!ZOv6R zCx*-1)F!s0JC-LwJD!N;vCyI3i{&FF68-7vd^zRCYT0z&2rv-m&g6fgX75bC%rsD< ztSf=)=-bRR0zH2!M}zW+k7Z z3G^VBXIRoiXyATUo*N}WZ8iLf=eEv2_L9)OC<1jv-k5liu`3x|Z ziO|X!gsam%JPxFCdVtCJWrh4TsKoyk@}ujcGJLcsUDZxzFaZp7b=;#c1KXG;Dq6!TI_(o#zJF1iZWOZYZ{GR~6qe2S@3uEL}1`B9--7T&=_ zr91#jOZhmcQTLbf17O-N*W!}5xR(rZ^DQ20YLIv0gpGWKZ@FyAqIl6DH{vH7dAr|P z+3YFWE{|E3>&jfYG{;_$6MbI&bQ6!~nhiJe68hC#x`kT-E~}+m`A9Htmj~o3PD=~! zE9XJJ-J@2SVMhgj25*$}bo#ZMRY5CT)l(IG43MxLc;jvUJNcs8{0_efCNPJp7vAL= PCJ@yLJLoEye9Y|s0T$y; diff --git a/storagext/lib/src/types/storage_provider.rs b/storagext/lib/src/types/storage_provider.rs index 29f3ee61f..6a1962984 100644 --- a/storagext/lib/src/types/storage_provider.rs +++ b/storagext/lib/src/types/storage_provider.rs @@ -274,7 +274,7 @@ impl Into for PoStProof { pub struct SubmitWindowedPoStParams { pub deadline: u64, pub partitions: Vec, - pub proof: PoStProof, + pub proofs: Vec, } impl Into for SubmitWindowedPoStParams { @@ -282,7 +282,12 @@ impl Into for SubmitWindowedPoStParams { RuntimeSubmitWindowedPoStParams { deadline: self.deadline, partitions: bounded_vec::BoundedVec(self.partitions), - proof: self.proof.into(), + proofs: bounded_vec::BoundedVec( + self.proofs + .into_iter() + .map(|p| p.into()) + .collect::>(), + ), } } } @@ -493,10 +498,10 @@ mod tests { r#"{ "deadline": 10, "partitions": [10], - "proof": { + "proofs": [{ "post_proof": "2KiB", "proof_bytes": "1234567890" - } + }] }"#, ) .unwrap(); @@ -505,10 +510,10 @@ mod tests { SubmitWindowedPoStParams { deadline: 10, partitions: vec![10], - proof: PoStProof { + proofs: vec![PoStProof { post_proof: RegisteredPoStProof::StackedDRGWindow2KiBV1P1, proof_bytes: vec![0x12u8, 0x34, 0x56, 0x78, 0x90] - } + }], } ); }