diff --git a/packages/eth/src/eip_4844.rs b/packages/eth/src/eip_4844.rs new file mode 100644 index 00000000..e9d6fb4a --- /dev/null +++ b/packages/eth/src/eip_4844.rs @@ -0,0 +1,5 @@ +mod types; +mod utils; + +pub use types::*; +pub use utils::*; diff --git a/packages/eth/src/eip_4844/types.rs b/packages/eth/src/eip_4844/types.rs new file mode 100644 index 00000000..6bc6d4cb --- /dev/null +++ b/packages/eth/src/eip_4844/types.rs @@ -0,0 +1,244 @@ +use std::{ffi::CString, ops::Deref}; + +use ethers::{ + core::k256::sha2::{Digest, Sha256}, + types::{Address, Signature, H256, U256}, + utils::keccak256, +}; +use lazy_static::lazy_static; +use rlp::RlpStream; + +lazy_static! { + static ref KZG_SETTINGS: c_kzg::KzgSettings = c_kzg::KzgSettings::load_trusted_setup_file( + &CString::new("trusted_setup.txt").expect("C string"), + ) + .unwrap(); +} + +const BLOB_TX_TYPE: u8 = 0x03; +const VERSIONED_HASH_VERSION_KZG: u8 = 1; +const MAX_BLOBS_PER_BLOCK: usize = 6; +pub const MAX_BYTES_PER_BLOB: usize = c_kzg::BYTES_PER_BLOB; + +pub trait BlobSigner { + fn sign_hash(&self, hash: H256) -> std::result::Result; +} + +impl BlobSigner for ethers::signers::LocalWallet { + fn sign_hash(&self, hash: H256) -> std::result::Result { + self.sign_hash(hash).map_err(|e| e.to_string()) + } +} + +pub struct PreparedBlob { + pub commitment: Vec, + pub proof: Vec, + pub versioned_hash: H256, + pub data: Vec, +} + +pub struct BlobSidecar { + blobs: Vec, +} + +impl BlobSidecar { + pub fn new(data: Vec) -> std::result::Result { + let num_blobs = data.len().div_ceil(MAX_BYTES_PER_BLOB); + + if num_blobs > MAX_BLOBS_PER_BLOCK { + return Err(format!( + "Data cannot fit into the maximum number of blobs per block: {}", + MAX_BLOBS_PER_BLOCK + )); + } + + let blobs = Self::partition_data(data); + let prepared_blobs = blobs.iter().map(|blob| Self::prepare_blob(blob)).collect(); + + Ok(Self { + blobs: prepared_blobs, + }) + } + + pub fn num_blobs(&self) -> usize { + self.blobs.len() + } + + pub fn versioned_hashes(&self) -> Vec { + self.blobs.iter().map(|blob| blob.versioned_hash).collect() + } + + fn partition_data(data: Vec) -> Vec { + data.chunks(c_kzg::BYTES_PER_BLOB) + .map(|chunk| { + let mut blob = [0u8; c_kzg::BYTES_PER_BLOB]; + blob[..c_kzg::BYTES_PER_BLOB].copy_from_slice(chunk); + blob.into() + }) + .collect() + } + + fn prepare_blob(blob: &c_kzg::Blob) -> PreparedBlob { + let commitment = Self::kzg_commitment(blob); + let versioned_hash = Self::commitment_to_versioned_hash(&commitment); + let proof = Self::kzg_proof(blob, &commitment); + + PreparedBlob { + commitment: commitment.to_vec(), + proof: proof.to_vec(), + versioned_hash, + data: blob.to_vec(), + } + } + + fn kzg_commitment(blob: &c_kzg::Blob) -> c_kzg::KzgCommitment { + c_kzg::KzgCommitment::blob_to_kzg_commitment(blob, &KZG_SETTINGS).unwrap() + } + + fn commitment_to_versioned_hash(commitment: &c_kzg::KzgCommitment) -> H256 { + let mut res: [u8; 32] = Sha256::digest(commitment.deref()).into(); + res[0] = VERSIONED_HASH_VERSION_KZG; + H256::from(res) + } + + fn kzg_proof(blob: &c_kzg::Blob, commitment: &c_kzg::KzgCommitment) -> c_kzg::KzgProof { + c_kzg::KzgProof::compute_blob_kzg_proof(blob, &commitment.to_bytes(), &KZG_SETTINGS) + .unwrap() + } +} + +pub struct BlobTransaction { + pub to: Address, + pub chain_id: U256, + pub gas_limit: U256, + pub nonce: U256, + pub max_fee_per_gas: U256, + pub max_priority_fee_per_gas: U256, + pub max_fee_per_blob_gas: U256, + pub blob_versioned_hashes: Vec, +} + +pub struct BlobTransactionEncoder { + tx: BlobTransaction, + sidecar: BlobSidecar, +} + +impl BlobTransactionEncoder { + pub fn new(tx: BlobTransaction, sidecar: BlobSidecar) -> Self { + Self { tx, sidecar } + } + + pub fn raw_signed_w_sidecar(self, signer: &impl BlobSigner) -> (H256, Vec) { + let signed_tx_bytes = self.raw_signed(signer); + let tx_hash = H256(keccak256(&signed_tx_bytes)); + let final_bytes = self.encode_sidecar(signed_tx_bytes); + + (tx_hash, final_bytes) + } + + fn encode_sidecar(self, payload: Vec) -> Vec { + let blobs_count = self.sidecar.num_blobs(); + + let mut stream = RlpStream::new(); + stream.begin_list(4); + + // skip the tx type byte + stream.append_raw(&payload[1..], 1); + + let mut blob_stream = RlpStream::new_list(blobs_count); + let mut commitment_stream = RlpStream::new_list(blobs_count); + let mut proof_stream = RlpStream::new_list(blobs_count); + + for blob in self.sidecar.blobs { + blob_stream.append(&blob.data); + commitment_stream.append(&blob.commitment); + proof_stream.append(&blob.proof); + } + + stream.append_raw(&blob_stream.out(), 1); + stream.append_raw(&commitment_stream.out(), 1); + stream.append_raw(&proof_stream.out(), 1); + + let tx = [&[BLOB_TX_TYPE], stream.as_raw()].concat(); + + tx + } + + fn raw_signed(&self, signer: &impl BlobSigner) -> Vec { + let tx_bytes = self.encode(None); + let signature = self.compute_signature(tx_bytes, signer); + let signed_tx_bytes = self.encode(Some(signature)); + + signed_tx_bytes + } + + fn compute_signature(&self, tx_bytes: Vec, signer: &impl BlobSigner) -> Signature { + let message_hash = H256::from(keccak256(&tx_bytes)); + let signature = signer + .sign_hash(message_hash) + .expect("signing should not fail"); + + signature + } + + fn encode(&self, signature: Option) -> Vec { + let tx_bytes = if let Some(signature) = signature { + self.rlp_signed(signature) + } else { + self.rlp() + }; + + [&[BLOB_TX_TYPE], tx_bytes.as_slice()].concat() + } + + fn rlp(&self) -> Vec { + let mut stream = RlpStream::new(); + stream.begin_list(11); + + self.append_common_tx_fields(&mut stream); + Self::append_unused_fields(&mut stream); + self.append_blob_tx_fields(&mut stream); + + stream.as_raw().to_vec() + } + + fn rlp_signed(&self, signature: Signature) -> Vec { + let mut stream = RlpStream::new(); + stream.begin_list(14); + + self.append_common_tx_fields(&mut stream); + Self::append_unused_fields(&mut stream); + self.append_blob_tx_fields(&mut stream); + + self.append_signature(&mut stream, signature); + + stream.as_raw().to_vec() + } + + fn append_common_tx_fields(&self, stream: &mut RlpStream) { + stream.append(&self.tx.chain_id); + stream.append(&self.tx.nonce); + stream.append(&self.tx.max_priority_fee_per_gas); + stream.append(&self.tx.max_fee_per_gas); + stream.append(&self.tx.gas_limit); + stream.append(&self.tx.to); + } + + fn append_unused_fields(stream: &mut RlpStream) { + // value, data and access_list + stream.append_empty_data(); + stream.append_empty_data(); + stream.begin_list(0); + } + + fn append_blob_tx_fields(&self, stream: &mut RlpStream) { + stream.append(&self.tx.max_fee_per_blob_gas); + stream.append_list(&self.tx.blob_versioned_hashes); + } + + fn append_signature(&self, stream: &mut RlpStream, signature: Signature) { + stream.append(&signature.v); + stream.append(&signature.r); + stream.append(&signature.s); + } +} diff --git a/packages/eth/src/eip_4844/utils.rs b/packages/eth/src/eip_4844/utils.rs new file mode 100644 index 00000000..44538736 --- /dev/null +++ b/packages/eth/src/eip_4844/utils.rs @@ -0,0 +1,35 @@ +use ports::types::U256; + +const BLOB_BASE_FEE_UPDATE_FRACTION: u64 = 3338477; +const GAS_PER_BLOB: u64 = 131_072; +const MIN_BASE_FEE_PER_BLOB_GAS: u64 = 1; + +pub fn calculate_blob_fee(excess_blob_gas: U256, num_blobs: u64) -> U256 { + get_total_blob_gas(num_blobs) * get_base_fee_per_blob_gas(excess_blob_gas) +} + +fn get_total_blob_gas(num_blobs: u64) -> U256 { + (GAS_PER_BLOB * num_blobs).into() +} + +fn get_base_fee_per_blob_gas(excess_blob_gas: U256) -> U256 { + fake_exponential( + MIN_BASE_FEE_PER_BLOB_GAS.into(), + excess_blob_gas, + BLOB_BASE_FEE_UPDATE_FRACTION.into(), + ) +} + +fn fake_exponential(factor: U256, numerator: U256, denominator: U256) -> U256 { + assert!(!denominator.is_zero(), "attempt to divide by zero"); + + let mut i = 1; + let mut output = U256::zero(); + let mut numerator_accum = factor * denominator; + while !numerator_accum.is_zero() { + output = output + numerator_accum; + numerator_accum = (numerator_accum * numerator) / (denominator * i); + i += 1; + } + output / denominator +} diff --git a/packages/services/src/state_committer.rs b/packages/services/src/state_committer.rs new file mode 100644 index 00000000..f339a00f --- /dev/null +++ b/packages/services/src/state_committer.rs @@ -0,0 +1,161 @@ +use async_trait::async_trait; +use ports::storage::Storage; + +use crate::{Result, Runner}; + +pub struct StateCommitter { + l1_adapter: L1, + storage: Db, +} + +impl StateCommitter { + pub fn new(l1: L1, storage: Db) -> Self { + Self { + l1_adapter: l1, + storage, + } + } +} + +impl StateCommitter +where + L1: ports::l1::Api, + Db: Storage, +{ + async fn submit_state(&self) -> Result<()> { + let fragments = self.storage.get_unsubmitted_fragments().await?; + + let data = fragments + .into_iter() + .flat_map(|fragment| fragment.raw_data) + .collect::>(); + + let tx_hash = self.l1_adapter.submit_l2_state(data).await?; + + dbg!(tx_hash); + self.storage.insert_pending_tx(tx_hash).await?; + + Ok(()) + } + + async fn is_tx_pending(&self) -> Result { + let pending_txs = self.storage.get_pending_txs().await?; + Ok(pending_txs.is_empty()) + } +} + +#[async_trait] +impl Runner for StateCommitter +where + L1: ports::l1::Api + Send + Sync, + Db: Storage, +{ + async fn run(&mut self) -> Result<()> { + if self.is_tx_pending().await? { + return Ok(()); + }; + + self.submit_state().await?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use ports::{ + fuel::{FuelBlock, FuelBytes32, MockApi as FuelMockApi}, + types::{L1Height, U256}, + }; + use storage::PostgresProcess; + use tai64::Tai64; + + use super::*; + + struct MockL1 { + api: ports::l1::MockApi, + } + impl MockL1 { + fn new() -> Self { + Self { + api: ports::l1::MockApi::new(), + } + } + } + + #[async_trait::async_trait] + impl ports::l1::Api for MockL1 { + async fn submit_l2_state(&self, _state_data: Vec) -> ports::l1::Result<[u8; 32]> { + Ok([0; 32]) + } + + async fn get_block_number(&self) -> ports::l1::Result { + Ok(0.into()) + } + + async fn balance(&self) -> ports::l1::Result { + Ok(U256::zero()) + } + } + + fn given_l1_that_expects_submission(block: ValidatedFuelBlock) -> MockL1 { + let mut l1 = MockL1::new(); + + l1.expect_submit_l2_state() + .with(predicate::eq(block)) + .return_once(move |_| Ok([0; 32])); + + l1.contract + .expect_submit() + .with(predicate::eq(block)) + .return_once(move |_| Ok(())); + + l1.api + .expect_get_block_number() + .return_once(move || Ok(0u32.into())); + + l1 + } + + fn given_block() -> FuelBlock { + let id = FuelBytes32::from([1u8; 32]); + let header = ports::fuel::FuelHeader { + id, + da_height: 0, + consensus_parameters_version: Default::default(), + state_transition_bytecode_version: Default::default(), + transactions_count: 1, + message_receipt_count: 0, + transactions_root: Default::default(), + message_outbox_root: Default::default(), + event_inbox_root: Default::default(), + height: 1, + prev_root: Default::default(), + time: Tai64::now(), + application_hash: Default::default(), + }; + let block = FuelBlock { + id, + header, + transactions: vec![], + consensus: ports::fuel::FuelConsensus::Unknown, + block_producer: Default::default(), + }; + + block + } + + #[tokio::test] + async fn test_submit_state() -> Result<()> { + let fuel_mock = FuelMockApi::new(); + let block = given_block(); + + let process = PostgresProcess::shared().await.unwrap(); + let db = process.create_random_db().await?; + let committer = StateCommitter::new(db, fuel_mock); + + committer.submit_state(block).await.unwrap(); + + Ok(()) + } +} diff --git a/packages/services/src/state_importer.rs b/packages/services/src/state_importer.rs new file mode 100644 index 00000000..9efae06b --- /dev/null +++ b/packages/services/src/state_importer.rs @@ -0,0 +1,134 @@ +use async_trait::async_trait; +use ports::{ + fuel::FuelBlock, + storage::Storage, + types::{StateFragment, StateSubmission}, +}; + +use crate::{Result, Runner}; + +pub struct StateImporter { + storage: Db, + fuel_adapter: A, +} + +impl StateImporter { + pub fn new(storage: Db, fuel_adapter: A) -> Self { + Self { + storage, + fuel_adapter, + } + } +} + +impl StateImporter +where + Db: Storage, + A: ports::fuel::Api, +{ + async fn fetch_latest_block(&self) -> Result { + let latest_block = self.fuel_adapter.latest_block().await?; + + // validate if needed + + Ok(latest_block) + } + + fn block_to_state_submission( + &self, + block: FuelBlock, + ) -> Result<(StateSubmission, Vec)> { + // Serialize the block into bytes + let block_data = block + .transactions + .iter() + .flat_map(|tx| tx.to_vec()) + .collect::>(); + + let fragments = block_data + .chunks(StateFragment::MAX_FRAGMENT_SIZE) + .enumerate() + .map(|(index, chunk)| StateFragment { + raw_data: chunk.to_vec(), + fragment_index: index as u32, + completed: false, + block_hash: *block.id, + }) + .collect::>(); + + let submission = StateSubmission { + block_hash: *block.header.id, + block_height: block.header.height, + completed: false, + }; + + Ok((submission, fragments)) + } + + async fn submit_state(&self, block: FuelBlock) -> Result<()> { + let (submission, fragments) = self.block_to_state_submission(block)?; + self.storage.insert_state(submission, fragments).await?; + + Ok(()) + } +} + +#[async_trait] +impl Runner for StateImporter +where + Db: Storage, + Fuel: ports::fuel::Api, +{ + async fn run(&mut self) -> Result<()> { + let block = self.fetch_latest_block().await?; + self.submit_state(block).await?; + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use ports::fuel::{FuelBytes32, MockApi as FuelMockApi}; + use storage::PostgresProcess; + use tai64::Tai64; + + use super::*; + + #[tokio::test] + async fn test_submit_state() -> Result<()> { + let fuel_mock = FuelMockApi::new(); + + let id = FuelBytes32::from([1u8; 32]); + let header = ports::fuel::FuelHeader { + id, + da_height: 0, + consensus_parameters_version: Default::default(), + state_transition_bytecode_version: Default::default(), + transactions_count: 1, + message_receipt_count: 0, + transactions_root: Default::default(), + message_outbox_root: Default::default(), + event_inbox_root: Default::default(), + height: 1, + prev_root: Default::default(), + time: Tai64::now(), + application_hash: Default::default(), + }; + let block = FuelBlock { + id, + header, + transactions: vec![], + consensus: ports::fuel::FuelConsensus::Unknown, + block_producer: Default::default(), + }; + + let process = PostgresProcess::shared().await.unwrap(); + let db = process.create_random_db().await?; + let committer = StateImporter::new(db, fuel_mock); + + committer.submit_state(block).await.unwrap(); + + Ok(()) + } +} diff --git a/packages/storage/.sqlx/query-1ef96af7a8e525a1e045fedd898a92bbdf388882f59ee9e309de8a2617ee8979.json b/packages/storage/.sqlx/query-1ef96af7a8e525a1e045fedd898a92bbdf388882f59ee9e309de8a2617ee8979.json new file mode 100644 index 00000000..b07f976b --- /dev/null +++ b/packages/storage/.sqlx/query-1ef96af7a8e525a1e045fedd898a92bbdf388882f59ee9e309de8a2617ee8979.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM l1_pending_transaction", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "transaction_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "1ef96af7a8e525a1e045fedd898a92bbdf388882f59ee9e309de8a2617ee8979" +} diff --git a/packages/storage/.sqlx/query-312026b68fc8b8cd1ffefda229ee18db5e0f1f5707cf3ca653d155043387a669.json b/packages/storage/.sqlx/query-312026b68fc8b8cd1ffefda229ee18db5e0f1f5707cf3ca653d155043387a669.json new file mode 100644 index 00000000..6aa7bdbc --- /dev/null +++ b/packages/storage/.sqlx/query-312026b68fc8b8cd1ffefda229ee18db5e0f1f5707cf3ca653d155043387a669.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO l1_pending_transaction (transaction_hash) VALUES ($1)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "312026b68fc8b8cd1ffefda229ee18db5e0f1f5707cf3ca653d155043387a669" +} diff --git a/packages/storage/.sqlx/query-ca854d38f6ebd416cae5699f46c11900b3b709fbad2e932cf8b188e48463a413.json b/packages/storage/.sqlx/query-ca854d38f6ebd416cae5699f46c11900b3b709fbad2e932cf8b188e48463a413.json new file mode 100644 index 00000000..8e690a87 --- /dev/null +++ b/packages/storage/.sqlx/query-ca854d38f6ebd416cae5699f46c11900b3b709fbad2e932cf8b188e48463a413.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT * FROM l1_state_fragment WHERE completed = false LIMIT 6", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "fuel_block_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "fragment_index", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "raw_data", + "type_info": "Bytea" + }, + { + "ordinal": 3, + "name": "completed", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "ca854d38f6ebd416cae5699f46c11900b3b709fbad2e932cf8b188e48463a413" +}