From 01ce3385fe3fb82198ff96b147b849cc729f5f5b Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 20 Sep 2024 14:48:18 -0400 Subject: [PATCH 01/51] opt(torii-core): move off queryqueue for executing tx --- .../core/src/{query_queue.rs => executor.rs} | 42 ++++++++++--------- crates/torii/core/src/lib.rs | 2 +- crates/torii/core/src/sql.rs | 2 +- 3 files changed, 24 insertions(+), 22 deletions(-) rename crates/torii/core/src/{query_queue.rs => executor.rs} (85%) diff --git a/crates/torii/core/src/query_queue.rs b/crates/torii/core/src/executor.rs similarity index 85% rename from crates/torii/core/src/query_queue.rs rename to crates/torii/core/src/executor.rs index 589035ca4e..f854b69dd3 100644 --- a/crates/torii/core/src/query_queue.rs +++ b/crates/torii/core/src/executor.rs @@ -1,5 +1,6 @@ use std::collections::VecDeque; - +use std::sync::Arc; +use tokio::sync::mpsc::{channel, Receiver, Sender}; use anyhow::{Context, Result}; use dojo_types::schema::Ty; use sqlx::{FromRow, Pool, Sqlite}; @@ -52,28 +53,29 @@ pub enum QueryType { Other, } -impl QueryQueue { - pub fn new(pool: Pool) -> Self { - QueryQueue { pool, queue: VecDeque::new(), publish_queue: VecDeque::new() } - } +pub struct TxExecutor { + pool: Pool, + rx: Receiver, +} - pub fn enqueue>( - &mut self, - statement: S, - arguments: Vec, - query_type: QueryType, - ) { - self.queue.push_back((statement.into(), arguments, query_type)); - } +pub struct QueryMessage { + statement: String, + arguments: Vec, + query_type: QueryType, +} - pub fn push_publish(&mut self, value: BrokerMessage) { - self.publish_queue.push_back(value); +impl TxExecutor { + pub fn new(pool: Pool) -> (Self, Sender) { + let (tx, rx) = channel(100); // Adjust buffer size as needed + (TxExecutor { pool, rx }, tx) } - pub async fn execute_all(&mut self) -> Result<()> { + pub async fn run(&mut self) -> Result<()> { let mut tx = self.pool.begin().await?; + let mut publish_queue = Vec::new(); - while let Some((statement, arguments, query_type)) = self.queue.pop_front() { + while let Some(msg) = self.rx.recv().await { + let QueryMessage { statement, arguments, query_type } = msg; let mut query = sqlx::query(&statement); for arg in &arguments { @@ -95,7 +97,7 @@ impl QueryQueue { entity_updated.updated_model = Some(entity); entity_updated.deleted = false; let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.push_publish(broker_message); + publish_queue.push(broker_message); } QueryType::DeleteEntity(entity) => { let delete_model = query.execute(&mut *tx).await.with_context(|| { @@ -134,7 +136,7 @@ impl QueryQueue { } let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.push_publish(broker_message); + publish_queue.push(broker_message); } QueryType::Other => { query.execute(&mut *tx).await.with_context(|| { @@ -146,7 +148,7 @@ impl QueryQueue { tx.commit().await?; - while let Some(message) = self.publish_queue.pop_front() { + for message in publish_queue { send_broker_message(message); } diff --git a/crates/torii/core/src/lib.rs b/crates/torii/core/src/lib.rs index df6e8b3adc..d47e9bf71f 100644 --- a/crates/torii/core/src/lib.rs +++ b/crates/torii/core/src/lib.rs @@ -3,7 +3,7 @@ pub mod engine; pub mod error; pub mod model; pub mod processors; -pub mod query_queue; +pub mod executor; pub mod simple_broker; pub mod sql; pub mod types; diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index ccca4f4c7d..2e2574d1cd 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -16,7 +16,7 @@ use starknet_crypto::poseidon_hash_many; use tracing::{debug, warn}; use crate::cache::{Model, ModelCache}; -use crate::query_queue::{Argument, BrokerMessage, DeleteEntityQuery, QueryQueue, QueryType}; +use crate::executor::{Argument, BrokerMessage, DeleteEntityQuery, QueryQueue, QueryType}; use crate::types::{ Event as EventEmitted, EventMessage as EventMessageUpdated, Model as ModelRegistered, }; From e0ec76772b7ba3062ced15bfd7ffaab31133c2e3 Mon Sep 17 00:00:00 2001 From: Nasr Date: Tue, 24 Sep 2024 12:16:55 -0400 Subject: [PATCH 02/51] feat: replace queury queue by executor --- crates/torii/core/src/engine.rs | 43 +-- crates/torii/core/src/executor.rs | 51 +-- crates/torii/core/src/sql.rs | 507 ++++++++++++++++-------------- crates/torii/core/src/sql_test.rs | 40 ++- 4 files changed, 351 insertions(+), 290 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index f24180ad44..127dbcd165 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -21,6 +21,7 @@ use tokio::task::JoinSet; use tokio::time::sleep; use tracing::{debug, error, info, trace, warn}; +use crate::executor::{Executor, QueryMessage, QueryType}; use crate::processors::event_message::EventMessageProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; use crate::sql::Sql; @@ -177,17 +178,29 @@ impl Engine

{ info!(target: LOG_TARGET, "Syncing reestablished."); } + let (mut executor, sender) = Executor::new(self.db.pool.clone()); + tokio::spawn(async move { + executor.run().await; + }); + self.db.executor = sender; + match self.process(fetch_result).await { - Ok(()) => {} - Err(e) => { - error!(target: LOG_TARGET, error = %e, "Processing fetched data."); - erroring_out = true; - sleep(backoff_delay).await; - if backoff_delay < max_backoff_delay { - backoff_delay *= 2; - } + Ok(()) => { + self.db.executor.send(QueryMessage { + statement: "COMMIT".to_string(), + arguments: vec![], + query_type: QueryType::Commit, + }); + } + Err(e) => { + error!(target: LOG_TARGET, error = %e, "Processing fetched data."); + erroring_out = true; + sleep(backoff_delay).await; + if backoff_delay < max_backoff_delay { + backoff_delay *= 2; } } + } } Err(e) => { erroring_out = true; @@ -411,7 +424,6 @@ impl Engine

{ if let Some(tx) = last_pending_block_world_tx { self.db.set_last_pending_block_world_tx(Some(tx)); } - self.db.execute().await?; return Ok(()); } _ => { @@ -447,8 +459,6 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(Some(tx)); } - self.db.execute().await?; - Ok(()) } @@ -482,10 +492,6 @@ impl Engine

{ self.process_block(block_number, data.blocks[&block_number]).await?; last_block = block_number; } - - if self.db.query_queue.queue.len() >= QUERY_QUEUE_BATCH_SIZE { - self.db.execute().await?; - } } // Process parallelized events @@ -495,8 +501,6 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(None); self.db.set_last_pending_block_tx(None); - self.db.execute().await?; - Ok(()) } @@ -532,10 +536,7 @@ impl Engine

{ } // Join all tasks - while let Some(result) = set.join_next().await { - let local_db = result??; - self.db.merge(local_db)?; - } + while let Some(_) = set.join_next().await {} Ok(()) } diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index f854b69dd3..e122f807e5 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -1,6 +1,6 @@ use std::collections::VecDeque; use std::sync::Arc; -use tokio::sync::mpsc::{channel, Receiver, Sender}; +use tokio::sync::mpsc::{unbounded_channel, Receiver, Sender, UnboundedReceiver, UnboundedSender}; use anyhow::{Context, Result}; use dojo_types::schema::Ty; use sqlx::{FromRow, Pool, Sqlite}; @@ -29,15 +29,6 @@ pub enum BrokerMessage { EventEmitted(EventEmitted), } -#[derive(Debug, Clone)] -pub struct QueryQueue { - pool: Pool, - pub queue: VecDeque<(String, Vec, QueryType)>, - // publishes that are related to queries in the queue, they should be sent - // after the queries are executed - pub publish_queue: VecDeque, -} - #[derive(Debug, Clone)] pub struct DeleteEntityQuery { pub entity_id: String, @@ -50,24 +41,27 @@ pub struct DeleteEntityQuery { pub enum QueryType { SetEntity(Ty), DeleteEntity(DeleteEntityQuery), + RegisterModel, + StoreEvent, + Commit, Other, } -pub struct TxExecutor { +pub struct Executor { pool: Pool, - rx: Receiver, + rx: UnboundedReceiver, } pub struct QueryMessage { - statement: String, - arguments: Vec, - query_type: QueryType, + pub statement: String, + pub arguments: Vec, + pub query_type: QueryType, } -impl TxExecutor { - pub fn new(pool: Pool) -> (Self, Sender) { - let (tx, rx) = channel(100); // Adjust buffer size as needed - (TxExecutor { pool, rx }, tx) +impl Executor { + pub fn new(pool: Pool) -> (Self, UnboundedSender) { + let (tx, rx) = unbounded_channel(); + (Executor { pool, rx }, tx) } pub async fn run(&mut self) -> Result<()> { @@ -138,6 +132,25 @@ impl TxExecutor { let broker_message = BrokerMessage::EntityUpdated(entity_updated); publish_queue.push(broker_message); } + QueryType::RegisterModel => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let model_registered = ModelRegistered::from_row(&row)?; + let broker_message = BrokerMessage::ModelRegistered(model_registered); + publish_queue.push(broker_message); + } + QueryType::StoreEvent => { + let row = query.fetch_one(&mut *tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let event = EventEmitted::from_row(&row)?; + let broker_message = BrokerMessage::EventEmitted(event); + publish_queue.push(broker_message); + } + QueryType::Commit => { + break; + } QueryType::Other => { query.execute(&mut *tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 2e2574d1cd..979478d41a 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -3,7 +3,6 @@ use std::str::FromStr; use std::sync::Arc; use anyhow::{anyhow, Result}; -use chrono::Utc; use dojo_types::primitive::Primitive; use dojo_types::schema::{EnumOption, Member, Struct, Ty}; use dojo_world::contracts::abi::model::Layout; @@ -13,14 +12,12 @@ use sqlx::pool::PoolConnection; use sqlx::{Pool, Sqlite}; use starknet::core::types::{Event, Felt, InvokeTransaction, Transaction}; use starknet_crypto::poseidon_hash_many; -use tracing::{debug, warn}; +use tokio::sync::mpsc::UnboundedSender; use crate::cache::{Model, ModelCache}; -use crate::executor::{Argument, BrokerMessage, DeleteEntityQuery, QueryQueue, QueryType}; -use crate::types::{ - Event as EventEmitted, EventMessage as EventMessageUpdated, Model as ModelRegistered, -}; -use crate::utils::{must_utc_datetime_from_timestamp, utc_dt_string_from_timestamp}; +use crate::executor::{Argument, DeleteEntityQuery, QueryMessage, QueryType}; +use crate::types::EventMessage as EventMessageUpdated; +use crate::utils::utc_dt_string_from_timestamp; type IsEventMessage = bool; type IsStoreUpdate = bool; @@ -32,68 +29,51 @@ pub const FELT_DELIMITER: &str = "/"; #[path = "sql_test.rs"] mod test; -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct Sql { world_address: Felt, pub pool: Pool, - pub query_queue: QueryQueue, + pub executor: UnboundedSender, model_cache: Arc, } -impl Clone for Sql { - fn clone(&self) -> Self { - Self { - world_address: self.world_address, - pool: self.pool.clone(), - query_queue: QueryQueue::new(self.pool.clone()), - model_cache: self.model_cache.clone(), - } - } -} - impl Sql { - pub async fn new(pool: Pool, world_address: Felt) -> Result { - let mut query_queue = QueryQueue::new(pool.clone()); - - query_queue.enqueue( - "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ - ?)", - vec![ + pub async fn new( + pool: Pool, + world_address: Felt, + executor: UnboundedSender, + ) -> Result { + executor.send(QueryMessage { + statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ + ?)".to_string(), + arguments: vec![ Argument::FieldElement(world_address), Argument::FieldElement(world_address), Argument::String(WORLD_CONTRACT_TYPE.to_string()), ], - QueryType::Other, - ); + query_type: QueryType::Other, + }); - query_queue.execute_all().await?; + executor.send(QueryMessage { + statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ + ?)".to_string(), + arguments: vec![ + Argument::FieldElement(world_address), + Argument::FieldElement(world_address), + Argument::String(WORLD_CONTRACT_TYPE.to_string()), + ], + query_type: QueryType::Other, + }); Ok(Self { pool: pool.clone(), world_address, - query_queue, + executor, model_cache: Arc::new(ModelCache::new(pool)), }) } - pub fn merge(&mut self, other: Sql) -> Result<()> { - // Merge query queue - self.query_queue.queue.extend(other.query_queue.queue); - self.query_queue.publish_queue.extend(other.query_queue.publish_queue); - - // This should never happen - if self.world_address != other.world_address { - warn!( - "Merging Sql instances with different world addresses: {} and {}", - self.world_address, other.world_address - ); - } - - Ok(()) - } - pub async fn head(&self) -> Result<(u64, Option, Option)> { - let mut conn: PoolConnection = self.pool.acquire().await?; let indexer_query = sqlx::query_as::<_, (Option, Option, Option, String)>( "SELECT head, last_pending_block_world_tx, last_pending_block_tx, contract_type \ @@ -102,7 +82,7 @@ impl Sql { .bind(format!("{:#x}", self.world_address)); let indexer: (Option, Option, Option, String) = - indexer_query.fetch_one(&mut *conn).await?; + indexer_query.fetch_one(&self.pool).await?; Ok(( indexer.0.map(|h| h.try_into().expect("doesn't fit in u64")).unwrap_or(0), indexer.1.map(|f| Felt::from_str(&f)).transpose()?, @@ -113,11 +93,11 @@ impl Sql { pub fn set_head(&mut self, head: u64) { let head = Argument::Int(head.try_into().expect("doesn't fit in u64")); let id = Argument::FieldElement(self.world_address); - self.query_queue.enqueue( - "UPDATE contracts SET head = ? WHERE id = ?", - vec![head, id], - QueryType::Other, - ); + self.executor.send(QueryMessage { + statement: "UPDATE contracts SET head = ? WHERE id = ?".to_string(), + arguments: vec![head, id], + query_type: QueryType::Other, + }); } pub fn set_last_pending_block_world_tx(&mut self, last_pending_block_world_tx: Option) { @@ -129,11 +109,12 @@ impl Sql { let id = Argument::FieldElement(self.world_address); - self.query_queue.enqueue( - "UPDATE contracts SET last_pending_block_world_tx = ? WHERE id = ?", - vec![last_pending_block_world_tx, id], - QueryType::Other, - ); + self.executor.send(QueryMessage { + statement: "UPDATE contracts SET last_pending_block_world_tx = ? WHERE id = ?" + .to_string(), + arguments: vec![last_pending_block_world_tx, id], + query_type: QueryType::Other, + }); } pub fn set_last_pending_block_tx(&mut self, last_pending_block_tx: Option) { @@ -144,11 +125,11 @@ impl Sql { }; let id = Argument::FieldElement(self.world_address); - self.query_queue.enqueue( - "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?", - vec![last_pending_block_tx, id], - QueryType::Other, - ); + self.executor.send(QueryMessage { + statement: "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?".to_string(), + arguments: vec![last_pending_block_tx, id], + query_type: QueryType::Other, + }); } #[allow(clippy::too_many_arguments)] @@ -173,19 +154,22 @@ impl Sql { class_hash=EXCLUDED.class_hash, layout=EXCLUDED.layout, \ packed_size=EXCLUDED.packed_size, unpacked_size=EXCLUDED.unpacked_size, \ executed_at=EXCLUDED.executed_at RETURNING *"; - let model_registered: ModelRegistered = sqlx::query_as(insert_models) - // this is temporary until the model hash is precomputed - .bind(format!("{:#x}", selector)) - .bind(namespace) - .bind(model.name()) - .bind(format!("{class_hash:#x}")) - .bind(format!("{contract_address:#x}")) - .bind(serde_json::to_string(&layout)?) - .bind(packed_size) - .bind(unpacked_size) - .bind(utc_dt_string_from_timestamp(block_timestamp)) - .fetch_one(&self.pool) - .await?; + let arguments = vec![ + Argument::String(format!("{:#x}", selector)), + Argument::String(namespace.to_string()), + Argument::String(model.name().to_string()), + Argument::String(format!("{class_hash:#x}")), + Argument::String(format!("{contract_address:#x}")), + Argument::String(serde_json::to_string(&layout)?), + Argument::Int(packed_size as i64), + Argument::Int(unpacked_size as i64), + Argument::String(utc_dt_string_from_timestamp(block_timestamp)), + ]; + self.executor.send(QueryMessage { + statement: insert_models.to_string(), + arguments, + query_type: QueryType::RegisterModel, + }); let mut model_idx = 0_i64; self.build_register_queries_recursive( @@ -220,7 +204,6 @@ impl Sql { }, ) .await; - self.query_queue.push_publish(BrokerMessage::ModelRegistered(model_registered)); Ok(()) } @@ -260,14 +243,18 @@ impl Sql { arguments.push(Argument::String(keys.to_string())); } - self.query_queue.enqueue(insert_entities, arguments, QueryType::SetEntity(entity.clone())); + self.executor.send(QueryMessage { + statement: insert_entities.to_string(), + arguments, + query_type: QueryType::SetEntity(entity.clone()), + }); - self.query_queue.enqueue( - "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ - model_id) DO NOTHING", - vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], - QueryType::Other, - ); + self.executor.send(QueryMessage { + statement: "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ + model_id) DO NOTHING".to_string(), + arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + query_type: QueryType::Other, + }); let path = vec![namespaced_name]; self.build_set_entity_queries_recursive( @@ -304,12 +291,12 @@ impl Sql { let entity_id = format!("{:#x}", poseidon_hash_many(&keys)); let model_id = format!("{:#x}", compute_selector_from_names(model_namespace, model_name)); - self.query_queue.enqueue( - "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ - model_id) DO NOTHING", - vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], - QueryType::Other, - ); + self.executor.send(QueryMessage { + statement: "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ + model_id) DO NOTHING".to_string(), + arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + query_type: QueryType::Other, + }); let keys_str = felts_sql_string(&keys); let insert_entities = "INSERT INTO event_messages (id, keys, event_id, executed_at) \ @@ -336,7 +323,20 @@ impl Sql { &vec![], ); - self.query_queue.push_publish(BrokerMessage::EventMessageUpdated(event_message_updated)); + self.executor.send(QueryMessage { + statement: "INSERT INTO event_messages (id, keys, event_id, executed_at) \ + VALUES (?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET \ + updated_at=CURRENT_TIMESTAMP, executed_at=EXCLUDED.executed_at, \ + event_id=EXCLUDED.event_id RETURNING *" + .to_string(), + arguments: vec![ + Argument::String(entity_id.clone()), + Argument::String(keys_str), + Argument::String(event_id.to_string()), + Argument::String(utc_dt_string_from_timestamp(block_timestamp)), + ], + query_type: QueryType::Other, + }); Ok(()) } @@ -354,16 +354,19 @@ impl Sql { // delete entity models data self.build_delete_entity_queries_recursive(path, &entity_id, &entity); - self.query_queue.enqueue( - "DELETE FROM entity_model WHERE entity_id = ? AND model_id = ?", - vec![Argument::String(entity_id.clone()), Argument::String(format!("{:#x}", model_id))], - QueryType::DeleteEntity(DeleteEntityQuery { + self.executor.send(QueryMessage { + statement: "DELETE FROM entity_model WHERE entity_id = ? AND model_id = ?".to_string(), + arguments: vec![ + Argument::String(entity_id.clone()), + Argument::String(format!("{:#x}", model_id)), + ], + query_type: QueryType::DeleteEntity(DeleteEntityQuery { entity_id: entity_id.clone(), event_id: event_id.to_string(), block_timestamp: utc_dt_string_from_timestamp(block_timestamp), entity: entity.clone(), }), - ); + }); Ok(()) } @@ -373,13 +376,15 @@ impl Sql { let uri = Argument::String(uri.to_string()); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); - self.query_queue.enqueue( - "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON CONFLICT(id) DO \ + self.executor.send(QueryMessage { + statement: + "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON CONFLICT(id) DO \ UPDATE SET id=excluded.id, executed_at=excluded.executed_at, \ - updated_at=CURRENT_TIMESTAMP", - vec![resource, uri, executed_at], - QueryType::Other, - ); + updated_at=CURRENT_TIMESTAMP" + .to_string(), + arguments: vec![resource, uri, executed_at], + query_type: QueryType::Other, + }); } pub async fn update_metadata( @@ -408,7 +413,7 @@ impl Sql { let statement = format!("UPDATE metadata SET {} WHERE id = ?", update.join(",")); arguments.push(Argument::FieldElement(*resource)); - self.query_queue.enqueue(statement, arguments, QueryType::Other); + self.executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other }); Ok(()) } @@ -468,11 +473,11 @@ impl Sql { _ => return, }; - self.query_queue.enqueue( - "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, calldata, \ + self.executor.send(QueryMessage { + statement: "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, calldata, \ max_fee, signature, nonce, transaction_type, executed_at) VALUES (?, ?, ?, ?, ?, ?, \ - ?, ?, ?)", - vec![ + ?, ?, ?)".to_string(), + arguments: vec![ id, transaction_hash, sender_address, @@ -483,8 +488,8 @@ impl Sql { Argument::String(transaction_type.to_string()), Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ], - QueryType::Other, - ); + query_type: QueryType::Other, + }); } pub fn store_event( @@ -500,23 +505,12 @@ impl Sql { let hash = Argument::FieldElement(transaction_hash); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); - self.query_queue.enqueue( - "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, executed_at) VALUES \ - (?, ?, ?, ?, ?)", - vec![id, keys, data, hash, executed_at], - QueryType::Other, - ); - - let emitted = EventEmitted { - id: event_id.to_string(), - keys: felts_sql_string(&event.keys), - data: felts_sql_string(&event.data), - transaction_hash: format!("{:#x}", transaction_hash), - created_at: Utc::now(), - executed_at: must_utc_datetime_from_timestamp(block_timestamp), - }; - - self.query_queue.push_publish(BrokerMessage::EventEmitted(emitted)); + self.executor.send(QueryMessage { + statement: "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, executed_at) VALUES \ + (?, ?, ?, ?, ?)".to_string(), + arguments: vec![id.clone(), keys.clone(), data.clone(), hash.clone(), executed_at.clone()], + query_type: QueryType::StoreEvent, + }); } #[allow(clippy::too_many_arguments)] @@ -606,99 +600,100 @@ impl Sql { let (entity_id, is_event_message) = entity_id; let (entity, is_store_update_member) = entity; - let update_members = - |members: &[Member], query_queue: &mut QueryQueue, indexes: &Vec| { - let table_id = path.join("$"); - let mut columns = vec![ - "id".to_string(), - "event_id".to_string(), - "executed_at".to_string(), - "updated_at".to_string(), - if is_event_message { - "event_message_id".to_string() - } else { - "entity_id".to_string() - }, - ]; - - let mut arguments = vec![ - Argument::String(if is_event_message { - "event:".to_string() + entity_id - } else { - entity_id.to_string() - }), - Argument::String(event_id.to_string()), - Argument::String(utc_dt_string_from_timestamp(block_timestamp)), - Argument::String(chrono::Utc::now().to_rfc3339()), - Argument::String(entity_id.to_string()), - ]; + let update_members = |members: &[Member], + executor: &mut UnboundedSender, + indexes: &Vec| { + let table_id = path.join("$"); + let mut columns = vec![ + "id".to_string(), + "event_id".to_string(), + "executed_at".to_string(), + "updated_at".to_string(), + if is_event_message { + "event_message_id".to_string() + } else { + "entity_id".to_string() + }, + ]; - if !indexes.is_empty() { - columns.push("full_array_id".to_string()); - arguments.push(Argument::String( - std::iter::once(entity_id.to_string()) - .chain(indexes.iter().map(|i| i.to_string())) - .collect::>() - .join(FELT_DELIMITER), - )); - } + let mut arguments = vec![ + Argument::String(if is_event_message { + "event:".to_string() + entity_id + } else { + entity_id.to_string() + }), + Argument::String(event_id.to_string()), + Argument::String(utc_dt_string_from_timestamp(block_timestamp)), + Argument::String(chrono::Utc::now().to_rfc3339()), + Argument::String(entity_id.to_string()), + ]; + + if !indexes.is_empty() { + columns.push("full_array_id".to_string()); + arguments.push(Argument::String( + std::iter::once(entity_id.to_string()) + .chain(indexes.iter().map(|i| i.to_string())) + .collect::>() + .join(FELT_DELIMITER), + )); + } - for (column_idx, idx) in indexes.iter().enumerate() { - columns.push(format!("idx_{}", column_idx)); - arguments.push(Argument::Int(*idx)); - } + for (column_idx, idx) in indexes.iter().enumerate() { + columns.push(format!("idx_{}", column_idx)); + arguments.push(Argument::Int(*idx)); + } - for member in members.iter() { - match &member.ty { - Ty::Primitive(ty) => { - columns.push(format!("external_{}", &member.name)); - arguments.push(Argument::String(ty.to_sql_value().unwrap())); - } - Ty::Enum(e) => { - columns.push(format!("external_{}", &member.name)); - arguments.push(Argument::String(e.to_sql_value().unwrap())); - } - Ty::ByteArray(b) => { - columns.push(format!("external_{}", &member.name)); - arguments.push(Argument::String(b.clone())); - } - _ => {} + for member in members.iter() { + match &member.ty { + Ty::Primitive(ty) => { + columns.push(format!("external_{}", &member.name)); + arguments.push(Argument::String(ty.to_sql_value().unwrap())); } + Ty::Enum(e) => { + columns.push(format!("external_{}", &member.name)); + arguments.push(Argument::String(e.to_sql_value().unwrap())); + } + Ty::ByteArray(b) => { + columns.push(format!("external_{}", &member.name)); + arguments.push(Argument::String(b.clone())); + } + _ => {} } + } - let placeholders: Vec<&str> = arguments.iter().map(|_| "?").collect(); - let statement = if is_store_update_member && indexes.is_empty() { - arguments.push(Argument::String(if is_event_message { - "event:".to_string() + entity_id - } else { - entity_id.to_string() - })); - - // row has to exist. update it directly - format!( - "UPDATE [{table_id}] SET {updates} WHERE id = ?", - table_id = table_id, - updates = columns - .iter() - .zip(placeholders.iter()) - .map(|(column, placeholder)| format!("{} = {}", column, placeholder)) - .collect::>() - .join(", ") - ) + let placeholders: Vec<&str> = arguments.iter().map(|_| "?").collect(); + let statement = if is_store_update_member && indexes.is_empty() { + arguments.push(Argument::String(if is_event_message { + "event:".to_string() + entity_id } else { - format!( - "INSERT OR REPLACE INTO [{table_id}] ({}) VALUES ({})", - columns.join(","), - placeholders.join(",") - ) - }; - - query_queue.enqueue(statement, arguments, QueryType::Other); + entity_id.to_string() + })); + + // row has to exist. update it directly + format!( + "UPDATE [{table_id}] SET {updates} WHERE id = ?", + table_id = table_id, + updates = columns + .iter() + .zip(placeholders.iter()) + .map(|(column, placeholder)| format!("{} = {}", column, placeholder)) + .collect::>() + .join(", ") + ) + } else { + format!( + "INSERT OR REPLACE INTO [{table_id}] ({}) VALUES ({})", + columns.join(","), + placeholders.join(",") + ) }; + executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other }); + }; + match entity { Ty::Struct(s) => { - update_members(&s.children, &mut self.query_queue, indexes); + update_members(&s.children, &mut self.executor, indexes); for member in s.children.iter() { let mut path_clone = path.clone(); @@ -716,7 +711,11 @@ impl Sql { Ty::Enum(e) => { if e.options.iter().all( |o| { - if let Ty::Tuple(t) = &o.ty { t.is_empty() } else { false } + if let Ty::Tuple(t) = &o.ty { + t.is_empty() + } else { + false + } }, ) { return; @@ -729,7 +728,7 @@ impl Sql { Member { name: "option".to_string(), ty: Ty::Enum(e.clone()), key: false }, Member { name: option.name.clone(), ty: option.ty.clone(), key: false }, ], - &mut self.query_queue, + &mut self.executor, indexes, ); @@ -761,7 +760,7 @@ impl Sql { }) .collect::>() .as_slice(), - &mut self.query_queue, + &mut self.executor, indexes, ); @@ -791,7 +790,11 @@ impl Sql { let mut arguments = vec![Argument::String(entity_id.to_string())]; arguments.extend(indexes.iter().map(|idx| Argument::Int(*idx))); - self.query_queue.enqueue(query, arguments, QueryType::Other); + self.executor.send(QueryMessage { + statement: query, + arguments, + query_type: QueryType::Other, + }); // insert the new array elements for (idx, member) in array.iter().enumerate() { @@ -800,7 +803,7 @@ impl Sql { update_members( &[Member { name: "data".to_string(), ty: member.clone(), key: false }], - &mut self.query_queue, + &mut self.executor, &indexes, ); @@ -830,11 +833,11 @@ impl Sql { Ty::Struct(s) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.query_queue.enqueue( + self.executor.send(QueryMessage { statement, - vec![Argument::String(entity_id.to_string())], - QueryType::Other, - ); + arguments: vec![Argument::String(entity_id.to_string())], + query_type: QueryType::Other, + }); for member in s.children.iter() { let mut path_clone = path.clone(); path_clone.push(member.name.clone()); @@ -851,11 +854,11 @@ impl Sql { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.query_queue.enqueue( + self.executor.send(QueryMessage { statement, - vec![Argument::String(entity_id.to_string())], - QueryType::Other, - ); + arguments: vec![Argument::String(entity_id.to_string())], + query_type: QueryType::Other, + }); for child in e.options.iter() { if let Ty::Tuple(t) = &child.ty { @@ -872,11 +875,11 @@ impl Sql { Ty::Array(array) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.query_queue.enqueue( + self.executor.send(QueryMessage { statement, - vec![Argument::String(entity_id.to_string())], - QueryType::Other, - ); + arguments: vec![Argument::String(entity_id.to_string())], + query_type: QueryType::Other, + }); for member in array.iter() { let mut path_clone = path.clone(); @@ -887,11 +890,11 @@ impl Sql { Ty::Tuple(t) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.query_queue.enqueue( + self.executor.send(QueryMessage { statement, - vec![Argument::String(entity_id.to_string())], - QueryType::Other, - ); + arguments: vec![Argument::String(entity_id.to_string())], + query_type: QueryType::Other, + }); for (idx, member) in t.iter().enumerate() { let mut path_clone = path.clone(); @@ -1005,7 +1008,11 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.query_queue.enqueue(statement, arguments, QueryType::Other); + self.executor.send(QueryMessage { + statement: statement.to_string(), + arguments, + query_type: QueryType::Other, + }); } } Ty::Tuple(tuple) => { @@ -1033,7 +1040,11 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.query_queue.enqueue(statement, arguments, QueryType::Other); + self.executor.send(QueryMessage { + statement: statement.to_string(), + arguments, + query_type: QueryType::Other, + }); } } Ty::Array(array) => { @@ -1058,7 +1069,11 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.query_queue.enqueue(statement, arguments, QueryType::Other); + self.executor.send(QueryMessage { + statement: statement.to_string(), + arguments, + query_type: QueryType::Other, + }); } Ty::Enum(e) => { for (idx, child) in e @@ -1097,7 +1112,11 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.query_queue.enqueue(statement, arguments, QueryType::Other); + self.executor.send(QueryMessage { + statement: statement.to_string(), + arguments, + query_type: QueryType::Other, + }); } } _ => {} @@ -1136,20 +1155,20 @@ impl Sql { create_table_query .push_str("FOREIGN KEY (event_message_id) REFERENCES event_messages(id));"); - self.query_queue.enqueue(create_table_query, vec![], QueryType::Other); + self.executor.send(QueryMessage { + statement: create_table_query, + arguments: vec![], + query_type: QueryType::Other, + }); indices.iter().for_each(|s| { - self.query_queue.enqueue(s, vec![], QueryType::Other); + self.executor.send(QueryMessage { + statement: s.to_string(), + arguments: vec![], + query_type: QueryType::Other, + }); }); } - - /// Execute all queries in the queue - pub async fn execute(&mut self) -> Result<()> { - debug!("Executing {} queries from the queue", self.query_queue.queue.len()); - self.query_queue.execute_all().await?; - - Ok(()) - } } pub fn felts_sql_string(felts: &[Felt]) -> String { diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index b60ea3de36..61c24cc9fa 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -20,6 +20,7 @@ use starknet_crypto::poseidon_hash_many; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; +use crate::executor::{Executor, QueryMessage, QueryType}; use crate::processors::generate_event_processors_map; use crate::processors::register_model::RegisterModelProcessor; use crate::processors::store_del_record::StoreDelRecordProcessor; @@ -126,7 +127,12 @@ async fn test_load_from_remote() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let mut db = Sql::new(pool.clone(), world_reader.address).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); @@ -195,7 +201,11 @@ async fn test_load_from_remote() { assert_eq!(id, format!("{:#x}", poseidon_hash_many(&[account.address()]))); assert_eq!(keys, format!("{:#x}/", account.address())); - db.execute().await.unwrap(); + sender.send(QueryMessage { + statement: "COMMIT".to_string(), + arguments: vec![], + query_type: QueryType::Commit, + }); } #[tokio::test(flavor = "multi_thread")] @@ -286,7 +296,12 @@ async fn test_load_from_remote_del() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let mut db = Sql::new(pool.clone(), world_reader.address).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await; @@ -297,7 +312,11 @@ async fn test_load_from_remote_del() { // TODO: check how we can have a test that is more chronological with Torii re-syncing // to ensure we can test intermediate states. - db.execute().await.unwrap(); + sender.send(QueryMessage { + statement: "COMMIT".to_string(), + arguments: vec![], + query_type: QueryType::Commit, + }); } #[tokio::test(flavor = "multi_thread")] @@ -376,11 +395,20 @@ async fn test_update_with_set_record() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let mut db = Sql::new(pool.clone(), world_reader.address).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); - db.execute().await.unwrap(); + sender.send(QueryMessage { + statement: "COMMIT".to_string(), + arguments: vec![], + query_type: QueryType::Commit, + }); } /// Count the number of rows in a table. From 6097a604f40a776f800c6c64eb8f733e700f95a5 Mon Sep 17 00:00:00 2001 From: Nasr Date: Tue, 24 Sep 2024 12:43:13 -0400 Subject: [PATCH 03/51] fix: executor --- bin/torii/src/main.rs | 14 ++- crates/torii/core/src/engine.rs | 2 +- crates/torii/core/src/sql.rs | 164 +++++++++++++++----------- crates/torii/libp2p/src/server/mod.rs | 1 - 4 files changed, 108 insertions(+), 73 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index cf568429ce..5b7b0e4e6a 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -31,6 +31,7 @@ use tokio::sync::broadcast; use tokio::sync::broadcast::Sender; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, IndexingFlags, Processors}; +use torii_core::executor::{Executor, QueryMessage, QueryType}; use torii_core::processors::event_message::EventMessageProcessor; use torii_core::processors::generate_event_processors_map; use torii_core::processors::metadata_update::MetadataUpdateProcessor; @@ -185,7 +186,12 @@ async fn main() -> anyhow::Result<()> { // Get world address let world = WorldContractReader::new(args.world_address, provider.clone()); - let db = Sql::new(pool.clone(), args.world_address).await?; + let (mut executor, sender) = Executor::new(pool.clone()); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?; let processors = Processors { event: generate_event_processors_map(vec![ @@ -228,6 +234,12 @@ async fn main() -> anyhow::Result<()> { Some(block_tx), ); + sender.send(QueryMessage { + statement: "COMMIT".to_string(), + arguments: vec![], + query_type: QueryType::Commit, + })?; + let shutdown_rx = shutdown_tx.subscribe(); let (grpc_addr, grpc_server) = torii_grpc::server::new( shutdown_rx, diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 127dbcd165..d857280dec 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -186,7 +186,7 @@ impl Engine

{ match self.process(fetch_result).await { Ok(()) => { - self.db.executor.send(QueryMessage { + let _ = self.db.executor.send(QueryMessage { statement: "COMMIT".to_string(), arguments: vec![], query_type: QueryType::Commit, diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 979478d41a..05a33993da 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -52,7 +52,7 @@ impl Sql { Argument::String(WORLD_CONTRACT_TYPE.to_string()), ], query_type: QueryType::Other, - }); + })?; executor.send(QueryMessage { statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ @@ -63,7 +63,7 @@ impl Sql { Argument::String(WORLD_CONTRACT_TYPE.to_string()), ], query_type: QueryType::Other, - }); + })?; Ok(Self { pool: pool.clone(), @@ -90,17 +90,19 @@ impl Sql { )) } - pub fn set_head(&mut self, head: u64) { + pub fn set_head(&mut self, head: u64) -> Result<()> { let head = Argument::Int(head.try_into().expect("doesn't fit in u64")); let id = Argument::FieldElement(self.world_address); self.executor.send(QueryMessage { statement: "UPDATE contracts SET head = ? WHERE id = ?".to_string(), arguments: vec![head, id], query_type: QueryType::Other, - }); + })?; + + Ok(()) } - pub fn set_last_pending_block_world_tx(&mut self, last_pending_block_world_tx: Option) { + pub fn set_last_pending_block_world_tx(&mut self, last_pending_block_world_tx: Option) -> Result<()> { let last_pending_block_world_tx = if let Some(f) = last_pending_block_world_tx { Argument::String(format!("{:#x}", f)) } else { @@ -114,10 +116,12 @@ impl Sql { .to_string(), arguments: vec![last_pending_block_world_tx, id], query_type: QueryType::Other, - }); + })?; + + Ok(()) } - pub fn set_last_pending_block_tx(&mut self, last_pending_block_tx: Option) { + pub fn set_last_pending_block_tx(&mut self, last_pending_block_tx: Option) -> Result<()> { let last_pending_block_tx = if let Some(f) = last_pending_block_tx { Argument::String(format!("{:#x}", f)) } else { @@ -129,7 +133,9 @@ impl Sql { statement: "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?".to_string(), arguments: vec![last_pending_block_tx, id], query_type: QueryType::Other, - }); + })?; + + Ok(()) } #[allow(clippy::too_many_arguments)] @@ -169,7 +175,7 @@ impl Sql { statement: insert_models.to_string(), arguments, query_type: QueryType::RegisterModel, - }); + })?; let mut model_idx = 0_i64; self.build_register_queries_recursive( @@ -180,7 +186,7 @@ impl Sql { block_timestamp, &mut 0, &mut 0, - ); + )?; // we set the model in the cache directly // because entities might be using it before the query queue is processed @@ -247,14 +253,14 @@ impl Sql { statement: insert_entities.to_string(), arguments, query_type: QueryType::SetEntity(entity.clone()), - }); + })?; self.executor.send(QueryMessage { statement: "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ model_id) DO NOTHING".to_string(), arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], query_type: QueryType::Other, - }); + })?; let path = vec![namespaced_name]; self.build_set_entity_queries_recursive( @@ -264,7 +270,7 @@ impl Sql { (&entity, keys_str.is_none()), block_timestamp, &vec![], - ); + )?; Ok(()) } @@ -296,7 +302,7 @@ impl Sql { model_id) DO NOTHING".to_string(), arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], query_type: QueryType::Other, - }); + })?; let keys_str = felts_sql_string(&keys); let insert_entities = "INSERT INTO event_messages (id, keys, event_id, executed_at) \ @@ -321,7 +327,7 @@ impl Sql { (&entity, false), block_timestamp, &vec![], - ); + )?; self.executor.send(QueryMessage { statement: "INSERT INTO event_messages (id, keys, event_id, executed_at) \ @@ -336,7 +342,7 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ], query_type: QueryType::Other, - }); + })?; Ok(()) } @@ -352,7 +358,7 @@ impl Sql { let entity_id = format!("{:#x}", entity_id); let path = vec![entity.name()]; // delete entity models data - self.build_delete_entity_queries_recursive(path, &entity_id, &entity); + self.build_delete_entity_queries_recursive(path, &entity_id, &entity)?; self.executor.send(QueryMessage { statement: "DELETE FROM entity_model WHERE entity_id = ? AND model_id = ?".to_string(), @@ -366,12 +372,12 @@ impl Sql { block_timestamp: utc_dt_string_from_timestamp(block_timestamp), entity: entity.clone(), }), - }); + })?; Ok(()) } - pub fn set_metadata(&mut self, resource: &Felt, uri: &str, block_timestamp: u64) { + pub fn set_metadata(&mut self, resource: &Felt, uri: &str, block_timestamp: u64) -> Result<()> { let resource = Argument::FieldElement(*resource); let uri = Argument::String(uri.to_string()); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); @@ -384,7 +390,9 @@ impl Sql { .to_string(), arguments: vec![resource, uri, executed_at], query_type: QueryType::Other, - }); + })?; + + Ok(()) } pub async fn update_metadata( @@ -413,7 +421,7 @@ impl Sql { let statement = format!("UPDATE metadata SET {} WHERE id = ?", update.join(",")); arguments.push(Argument::FieldElement(*resource)); - self.executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other }); + self.executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other })?; Ok(()) } @@ -443,13 +451,13 @@ impl Sql { transaction: &Transaction, transaction_id: &str, block_timestamp: u64, - ) { + ) -> Result<()> { let id = Argument::String(transaction_id.to_string()); let transaction_type = match transaction { Transaction::Invoke(_) => "INVOKE", Transaction::L1Handler(_) => "L1_HANDLER", - _ => return, + _ => return Ok(()), }; let (transaction_hash, sender_address, calldata, max_fee, signature, nonce) = @@ -470,7 +478,7 @@ impl Sql { Argument::String("".to_string()), // has no signature Argument::FieldElement((l1_handler_transaction.nonce).into()), ), - _ => return, + _ => return Ok(()), }; self.executor.send(QueryMessage { @@ -489,7 +497,9 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ], query_type: QueryType::Other, - }); + })?; + + Ok(()) } pub fn store_event( @@ -498,7 +508,7 @@ impl Sql { event: &Event, transaction_hash: Felt, block_timestamp: u64, - ) { + ) -> Result<()> { let id = Argument::String(event_id.to_string()); let keys = Argument::String(felts_sql_string(&event.keys)); let data = Argument::String(felts_sql_string(&event.data)); @@ -510,7 +520,9 @@ impl Sql { (?, ?, ?, ?, ?)".to_string(), arguments: vec![id.clone(), keys.clone(), data.clone(), hash.clone(), executed_at.clone()], query_type: QueryType::StoreEvent, - }); + })?; + + Ok(()) } #[allow(clippy::too_many_arguments)] @@ -523,11 +535,11 @@ impl Sql { block_timestamp: u64, array_idx: &mut usize, parent_array_idx: &mut usize, - ) { + ) -> Result<()> { if let Ty::Enum(e) = model { if e.options.iter().all(|o| if let Ty::Tuple(t) = &o.ty { t.is_empty() } else { false }) { - return; + return Ok(()); } } @@ -539,13 +551,13 @@ impl Sql { block_timestamp, *array_idx, *parent_array_idx, - ); + )?; - let mut build_member = |pathname: &str, member: &Ty| { + let mut build_member = |pathname: &str, member: &Ty| -> Result<()> { if let Ty::Primitive(_) = member { - return; + return Ok(()); } else if let Ty::ByteArray(_) = member { - return; + return Ok(()); } let mut path_clone = path.clone(); @@ -559,20 +571,22 @@ impl Sql { block_timestamp, &mut (*array_idx + if let Ty::Array(_) = member { 1 } else { 0 }), &mut (*parent_array_idx + if let Ty::Array(_) = model { 1 } else { 0 }), - ); + )?; + + Ok(()) }; if let Ty::Struct(s) = model { for member in s.children.iter() { - build_member(&member.name, &member.ty); + build_member(&member.name, &member.ty)?; } } else if let Ty::Tuple(t) = model { for (idx, member) in t.iter().enumerate() { - build_member(format!("_{}", idx).as_str(), member); + build_member(format!("_{}", idx).as_str(), member)?; } } else if let Ty::Array(array) = model { let ty = &array[0]; - build_member("data", ty); + build_member("data", ty)?; } else if let Ty::Enum(e) = model { for child in e.options.iter() { // Skip enum options that have no type / member @@ -582,9 +596,11 @@ impl Sql { } } - build_member(&child.name, &child.ty); + build_member(&child.name, &child.ty)?; } } + + Ok(()) } fn build_set_entity_queries_recursive( @@ -596,13 +612,13 @@ impl Sql { entity: (&Ty, IsStoreUpdate), block_timestamp: u64, indexes: &Vec, - ) { + ) -> Result<()> { let (entity_id, is_event_message) = entity_id; let (entity, is_store_update_member) = entity; let update_members = |members: &[Member], executor: &mut UnboundedSender, - indexes: &Vec| { + indexes: &Vec| -> Result<()> { let table_id = path.join("$"); let mut columns = vec![ "id".to_string(), @@ -688,12 +704,14 @@ impl Sql { ) }; - executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other }); + executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other })?; + + Ok(()) }; match entity { Ty::Struct(s) => { - update_members(&s.children, &mut self.executor, indexes); + update_members(&s.children, &mut self.executor, indexes)?; for member in s.children.iter() { let mut path_clone = path.clone(); @@ -705,7 +723,7 @@ impl Sql { (&member.ty, is_store_update_member), block_timestamp, indexes, - ); + )?; } } Ty::Enum(e) => { @@ -718,7 +736,7 @@ impl Sql { } }, ) { - return; + return Ok(()); } let option = e.options[e.option.unwrap() as usize].clone(); @@ -730,7 +748,7 @@ impl Sql { ], &mut self.executor, indexes, - ); + )?; match &option.ty { // Skip enum options that have no type / member @@ -745,7 +763,7 @@ impl Sql { (&option.ty, is_store_update_member), block_timestamp, indexes, - ); + )?; } } } @@ -762,7 +780,7 @@ impl Sql { .as_slice(), &mut self.executor, indexes, - ); + )?; for (idx, member) in t.iter().enumerate() { let mut path_clone = path.clone(); @@ -774,7 +792,7 @@ impl Sql { (member, is_store_update_member), block_timestamp, indexes, - ); + )?; } } Ty::Array(array) => { @@ -794,7 +812,7 @@ impl Sql { statement: query, arguments, query_type: QueryType::Other, - }); + })?; // insert the new array elements for (idx, member) in array.iter().enumerate() { @@ -805,7 +823,7 @@ impl Sql { &[Member { name: "data".to_string(), ty: member.clone(), key: false }], &mut self.executor, &indexes, - ); + )?; let mut path_clone = path.clone(); path_clone.push("data".to_string()); @@ -816,11 +834,13 @@ impl Sql { (member, is_store_update_member), block_timestamp, &indexes, - ); + )?; } } _ => {} } + + Ok(()) } fn build_delete_entity_queries_recursive( @@ -828,7 +848,7 @@ impl Sql { path: Vec, entity_id: &str, entity: &Ty, - ) { + ) -> Result<()> { match entity { Ty::Struct(s) => { let table_id = path.join("$"); @@ -837,11 +857,11 @@ impl Sql { statement, arguments: vec![Argument::String(entity_id.to_string())], query_type: QueryType::Other, - }); + })?; for member in s.children.iter() { let mut path_clone = path.clone(); path_clone.push(member.name.clone()); - self.build_delete_entity_queries_recursive(path_clone, entity_id, &member.ty); + self.build_delete_entity_queries_recursive(path_clone, entity_id, &member.ty)?; } } Ty::Enum(e) => { @@ -849,7 +869,7 @@ impl Sql { .iter() .all(|o| if let Ty::Tuple(t) = &o.ty { t.is_empty() } else { false }) { - return; + return Ok(()); } let table_id = path.join("$"); @@ -858,7 +878,7 @@ impl Sql { statement, arguments: vec![Argument::String(entity_id.to_string())], query_type: QueryType::Other, - }); + })?; for child in e.options.iter() { if let Ty::Tuple(t) = &child.ty { @@ -869,7 +889,7 @@ impl Sql { let mut path_clone = path.clone(); path_clone.push(child.name.clone()); - self.build_delete_entity_queries_recursive(path_clone, entity_id, &child.ty); + self.build_delete_entity_queries_recursive(path_clone, entity_id, &child.ty)?; } } Ty::Array(array) => { @@ -879,12 +899,12 @@ impl Sql { statement, arguments: vec![Argument::String(entity_id.to_string())], query_type: QueryType::Other, - }); + })?; for member in array.iter() { let mut path_clone = path.clone(); path_clone.push("data".to_string()); - self.build_delete_entity_queries_recursive(path_clone, entity_id, member); + self.build_delete_entity_queries_recursive(path_clone, entity_id, member)?; } } Ty::Tuple(t) => { @@ -894,16 +914,18 @@ impl Sql { statement, arguments: vec![Argument::String(entity_id.to_string())], query_type: QueryType::Other, - }); + })?; for (idx, member) in t.iter().enumerate() { let mut path_clone = path.clone(); path_clone.push(format!("_{}", idx)); - self.build_delete_entity_queries_recursive(path_clone, entity_id, member); + self.build_delete_entity_queries_recursive(path_clone, entity_id, member)?; } } _ => {} } + + Ok(()) } #[allow(clippy::too_many_arguments)] @@ -916,7 +938,7 @@ impl Sql { block_timestamp: u64, array_idx: usize, parent_array_idx: usize, - ) { + ) -> Result<()> { let table_id = path.join("$"); let mut indices = Vec::new(); @@ -1012,7 +1034,7 @@ impl Sql { statement: statement.to_string(), arguments, query_type: QueryType::Other, - }); + })?; } } Ty::Tuple(tuple) => { @@ -1044,7 +1066,7 @@ impl Sql { statement: statement.to_string(), arguments, query_type: QueryType::Other, - }); + })?; } } Ty::Array(array) => { @@ -1073,7 +1095,7 @@ impl Sql { statement: statement.to_string(), arguments, query_type: QueryType::Other, - }); + })?; } Ty::Enum(e) => { for (idx, child) in e @@ -1116,7 +1138,7 @@ impl Sql { statement: statement.to_string(), arguments, query_type: QueryType::Other, - }); + })?; } } _ => {} @@ -1159,15 +1181,17 @@ impl Sql { statement: create_table_query, arguments: vec![], query_type: QueryType::Other, - }); + })?; - indices.iter().for_each(|s| { + for s in indices.iter() { self.executor.send(QueryMessage { statement: s.to_string(), arguments: vec![], query_type: QueryType::Other, - }); - }); + })?; + } + + Ok(()) } } diff --git a/crates/torii/libp2p/src/server/mod.rs b/crates/torii/libp2p/src/server/mod.rs index 9bc1e25ce3..e69776c865 100644 --- a/crates/torii/libp2p/src/server/mod.rs +++ b/crates/torii/libp2p/src/server/mod.rs @@ -529,7 +529,6 @@ async fn set_entity( keys: &str, ) -> anyhow::Result<()> { db.set_entity(ty, message_id, block_timestamp, entity_id, model_id, Some(keys)).await?; - db.execute().await?; Ok(()) } From 043f669c9a405c05f36a3928dcee27fcd83fb5ff Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 10:31:26 -0400 Subject: [PATCH 04/51] refactor: executor logic --- bin/torii/src/main.rs | 8 +--- crates/torii/core/src/engine.rs | 18 +++------ crates/torii/core/src/executor.rs | 63 ++++++++++++++++++------------- crates/torii/core/src/sql.rs | 6 +++ crates/torii/core/src/sql_test.rs | 32 +++++----------- 5 files changed, 59 insertions(+), 68 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 5b7b0e4e6a..8a83ecb7f8 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -186,7 +186,7 @@ async fn main() -> anyhow::Result<()> { // Get world address let world = WorldContractReader::new(args.world_address, provider.clone()); - let (mut executor, sender) = Executor::new(pool.clone()); + let (mut executor, sender) = Executor::new(pool.clone()).await?; tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -234,12 +234,6 @@ async fn main() -> anyhow::Result<()> { Some(block_tx), ); - sender.send(QueryMessage { - statement: "COMMIT".to_string(), - arguments: vec![], - query_type: QueryType::Commit, - })?; - let shutdown_rx = shutdown_tx.subscribe(); let (grpc_addr, grpc_server) = torii_grpc::server::new( shutdown_rx, diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index d857280dec..66b003dc4f 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -152,7 +152,7 @@ impl Engine

{ // use the start block provided by user if head is 0 let (head, _, _) = self.db.head().await?; if head == 0 { - self.db.set_head(self.config.start_block); + self.db.set_head(self.config.start_block)?; } else if self.config.start_block != 0 { warn!(target: LOG_TARGET, "Start block ignored, stored head exists and will be used instead."); } @@ -178,18 +178,12 @@ impl Engine

{ info!(target: LOG_TARGET, "Syncing reestablished."); } - let (mut executor, sender) = Executor::new(self.db.pool.clone()); - tokio::spawn(async move { - executor.run().await; - }); - self.db.executor = sender; - match self.process(fetch_result).await { Ok(()) => { let _ = self.db.executor.send(QueryMessage { - statement: "COMMIT".to_string(), + statement: "".to_string(), arguments: vec![], - query_type: QueryType::Commit, + query_type: QueryType::Execute, }); } Err(e) => { @@ -497,9 +491,9 @@ impl Engine

{ // Process parallelized events self.process_tasks().await?; - self.db.set_head(data.latest_block_number); - self.db.set_last_pending_block_world_tx(None); - self.db.set_last_pending_block_tx(None); + self.db.set_head(data.latest_block_number)?; + self.db.set_last_pending_block_world_tx(None)?; + self.db.set_last_pending_block_tx(None)?; Ok(()) } diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index e122f807e5..db825d63e2 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -1,10 +1,11 @@ use std::collections::VecDeque; -use std::sync::Arc; -use tokio::sync::mpsc::{unbounded_channel, Receiver, Sender, UnboundedReceiver, UnboundedSender}; +use std::mem; + use anyhow::{Context, Result}; use dojo_types::schema::Ty; -use sqlx::{FromRow, Pool, Sqlite}; +use sqlx::{FromRow, Pool, Sqlite, Transaction}; use starknet::core::types::Felt; +use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use crate::simple_broker::SimpleBroker; use crate::types::{ @@ -43,12 +44,14 @@ pub enum QueryType { DeleteEntity(DeleteEntityQuery), RegisterModel, StoreEvent, - Commit, + Execute, Other, } -pub struct Executor { +pub struct Executor<'c> { pool: Pool, + transaction: Transaction<'c, Sqlite>, + publish_queue: Vec, rx: UnboundedReceiver, } @@ -58,17 +61,18 @@ pub struct QueryMessage { pub query_type: QueryType, } -impl Executor { - pub fn new(pool: Pool) -> (Self, UnboundedSender) { +impl<'c> Executor<'c> { + pub async fn new(pool: Pool) -> Result<(Self, UnboundedSender)> { let (tx, rx) = unbounded_channel(); - (Executor { pool, rx }, tx) + let transaction = pool.begin().await?; + let publish_queue = Vec::new(); + + Ok((Executor { pool, transaction, publish_queue, rx }, tx)) } pub async fn run(&mut self) -> Result<()> { - let mut tx = self.pool.begin().await?; - let mut publish_queue = Vec::new(); - while let Some(msg) = self.rx.recv().await { + let tx = &mut self.transaction; let QueryMessage { statement, arguments, query_type } = msg; let mut query = sqlx::query(&statement); @@ -84,17 +88,17 @@ impl Executor { match query_type { QueryType::SetEntity(entity) => { - let row = query.fetch_one(&mut *tx).await.with_context(|| { + let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; let mut entity_updated = EntityUpdated::from_row(&row)?; entity_updated.updated_model = Some(entity); entity_updated.deleted = false; let broker_message = BrokerMessage::EntityUpdated(entity_updated); - publish_queue.push(broker_message); + self.publish_queue.push(broker_message); } QueryType::DeleteEntity(entity) => { - let delete_model = query.execute(&mut *tx).await.with_context(|| { + let delete_model = query.execute(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; if delete_model.rows_affected() == 0 { @@ -108,7 +112,7 @@ impl Executor { .bind(entity.block_timestamp) .bind(entity.event_id) .bind(entity.entity_id) - .fetch_one(&mut *tx) + .fetch_one(&mut **tx) .await?; let mut entity_updated = EntityUpdated::from_row(&row)?; entity_updated.updated_model = Some(entity.entity); @@ -117,51 +121,56 @@ impl Executor { "SELECT count(*) FROM entity_model WHERE entity_id = ?", ) .bind(entity_updated.id.clone()) - .fetch_one(&mut *tx) + .fetch_one(&mut **tx) .await?; // Delete entity if all of its models are deleted if count == 0 { sqlx::query("DELETE FROM entities WHERE id = ?") .bind(entity_updated.id.clone()) - .execute(&mut *tx) + .execute(&mut **tx) .await?; entity_updated.deleted = true; } let broker_message = BrokerMessage::EntityUpdated(entity_updated); - publish_queue.push(broker_message); + self.publish_queue.push(broker_message); } QueryType::RegisterModel => { - let row = query.fetch_one(&mut *tx).await.with_context(|| { + let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; let model_registered = ModelRegistered::from_row(&row)?; let broker_message = BrokerMessage::ModelRegistered(model_registered); - publish_queue.push(broker_message); + self.publish_queue.push(broker_message); } QueryType::StoreEvent => { - let row = query.fetch_one(&mut *tx).await.with_context(|| { + let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; let event = EventEmitted::from_row(&row)?; let broker_message = BrokerMessage::EventEmitted(event); - publish_queue.push(broker_message); + self.publish_queue.push(broker_message); } - QueryType::Commit => { - break; + QueryType::Execute => { + self.execute().await?; } QueryType::Other => { - query.execute(&mut *tx).await.with_context(|| { + query.execute(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; } } } - tx.commit().await?; + Ok(()) + } + + pub async fn execute(&mut self) -> Result<()> { + let transaction = mem::replace(&mut self.transaction, self.pool.begin().await?); + transaction.commit().await?; - for message in publish_queue { + for message in self.publish_queue.drain(..) { send_broker_message(message); } diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 05a33993da..cd79f0c871 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -65,6 +65,12 @@ impl Sql { query_type: QueryType::Other, })?; + executor.send(QueryMessage { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute, + })?; + Ok(Self { pool: pool.clone(), world_address, diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 61c24cc9fa..a6820d2846 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -41,7 +41,7 @@ where let to = provider.block_hash_and_number().await?.block_number; let mut engine = Engine::new( world, - db, + db.clone(), provider, Processors { event: generate_event_processors_map(vec![ @@ -61,6 +61,12 @@ where let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); + db.executor.send(QueryMessage { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute, + }); + Ok(engine) } @@ -127,7 +133,7 @@ async fn test_load_from_remote() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -200,12 +206,6 @@ async fn test_load_from_remote() { assert_eq!(id, format!("{:#x}", poseidon_hash_many(&[account.address()]))); assert_eq!(keys, format!("{:#x}/", account.address())); - - sender.send(QueryMessage { - statement: "COMMIT".to_string(), - arguments: vec![], - query_type: QueryType::Commit, - }); } #[tokio::test(flavor = "multi_thread")] @@ -296,7 +296,7 @@ async fn test_load_from_remote_del() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -311,12 +311,6 @@ async fn test_load_from_remote_del() { // TODO: check how we can have a test that is more chronological with Torii re-syncing // to ensure we can test intermediate states. - - sender.send(QueryMessage { - statement: "COMMIT".to_string(), - arguments: vec![], - query_type: QueryType::Commit, - }); } #[tokio::test(flavor = "multi_thread")] @@ -395,7 +389,7 @@ async fn test_update_with_set_record() { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -403,12 +397,6 @@ async fn test_update_with_set_record() { let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); - - sender.send(QueryMessage { - statement: "COMMIT".to_string(), - arguments: vec![], - query_type: QueryType::Commit, - }); } /// Count the number of rows in a table. From 93144387b43c45b4a8f8f4b73a9d9447dd34c4dc Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 11:59:42 -0400 Subject: [PATCH 05/51] fix: tests --- bin/torii/src/main.rs | 2 +- crates/torii/core/src/engine.rs | 16 +++---- .../core/src/processors/metadata_update.rs | 3 +- .../core/src/processors/store_transaction.rs | 2 +- crates/torii/core/src/sql.rs | 13 +++++- crates/torii/core/src/sql_test.rs | 2 +- .../torii/graphql/src/tests/metadata_test.rs | 22 +++++++--- crates/torii/graphql/src/tests/mod.rs | 13 ++++-- .../graphql/src/tests/subscription_test.rs | 43 ++++++++++++++----- .../grpc/src/server/tests/entities_test.rs | 7 ++- crates/torii/libp2p/src/tests.rs | 9 +++- 11 files changed, 92 insertions(+), 40 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 8a83ecb7f8..12bafbb0a7 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -31,7 +31,7 @@ use tokio::sync::broadcast; use tokio::sync::broadcast::Sender; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, IndexingFlags, Processors}; -use torii_core::executor::{Executor, QueryMessage, QueryType}; +use torii_core::executor::Executor; use torii_core::processors::event_message::EventMessageProcessor; use torii_core::processors::generate_event_processors_map; use torii_core::processors::metadata_update::MetadataUpdateProcessor; diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index f11379a036..3a3b327f04 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -21,7 +21,7 @@ use tokio::task::JoinSet; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; -use crate::executor::{Executor, QueryMessage, QueryType}; +use crate::executor::{QueryMessage, QueryType}; use crate::processors::event_message::EventMessageProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; use crate::sql::Sql; @@ -414,13 +414,13 @@ impl Engine

{ // provider. So we can fail silently and try // again in the next iteration. warn!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Retrieving pending transaction receipt."); - self.db.set_head(data.block_number - 1); + self.db.set_head(data.block_number - 1)?; if let Some(tx) = last_pending_block_tx { - self.db.set_last_pending_block_tx(Some(tx)); + self.db.set_last_pending_block_tx(Some(tx))?; } if let Some(tx) = last_pending_block_world_tx { - self.db.set_last_pending_block_world_tx(Some(tx)); + self.db.set_last_pending_block_world_tx(Some(tx))?; } return Ok(()); } @@ -447,14 +447,14 @@ impl Engine

{ // Set the head to the last processed pending transaction // Head block number should still be latest block number - self.db.set_head(data.block_number - 1); + self.db.set_head(data.block_number - 1)?; if let Some(tx) = last_pending_block_tx { - self.db.set_last_pending_block_tx(Some(tx)); + self.db.set_last_pending_block_tx(Some(tx))?; } if let Some(tx) = last_pending_block_world_tx { - self.db.set_last_pending_block_world_tx(Some(tx)); + self.db.set_last_pending_block_world_tx(Some(tx))?; } Ok(()) @@ -683,7 +683,7 @@ impl Engine

{ transaction_hash: Felt, ) -> Result<()> { if self.config.flags.contains(IndexingFlags::RAW_EVENTS) { - self.db.store_event(event_id, event, transaction_hash, block_timestamp); + self.db.store_event(event_id, event, transaction_hash, block_timestamp)?; } let event_key = event.keys[0]; diff --git a/crates/torii/core/src/processors/metadata_update.rs b/crates/torii/core/src/processors/metadata_update.rs index 594a32898a..6a02995b6c 100644 --- a/crates/torii/core/src/processors/metadata_update.rs +++ b/crates/torii/core/src/processors/metadata_update.rs @@ -64,7 +64,7 @@ where uri = %uri_str, "Resource metadata set." ); - db.set_metadata(resource, &uri_str, block_timestamp); + db.set_metadata(resource, &uri_str, block_timestamp)?; let db = db.clone(); let resource = *resource; @@ -84,7 +84,6 @@ async fn try_retrieve(mut db: Sql, resource: Felt, uri_str: String) { match metadata(uri_str.clone()).await { Ok((metadata, icon_img, cover_img)) => { db.update_metadata(&resource, &uri_str, &metadata, &icon_img, &cover_img) - .await .unwrap(); info!( target: LOG_TARGET, diff --git a/crates/torii/core/src/processors/store_transaction.rs b/crates/torii/core/src/processors/store_transaction.rs index 2e7056e401..101fb88093 100644 --- a/crates/torii/core/src/processors/store_transaction.rs +++ b/crates/torii/core/src/processors/store_transaction.rs @@ -21,7 +21,7 @@ impl TransactionProcessor

for StoreTran transaction: &Transaction, ) -> Result<(), Error> { let transaction_id = format!("{:#064x}:{:#x}", block_number, transaction_hash); - db.store_transaction(transaction, &transaction_id, block_timestamp); + db.store_transaction(transaction, &transaction_id, block_timestamp)?; Ok(()) } } diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 95503666bf..06feee2234 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -16,7 +16,6 @@ use tokio::sync::mpsc::UnboundedSender; use crate::cache::{Model, ModelCache}; use crate::executor::{Argument, DeleteEntityQuery, QueryMessage, QueryType}; -use crate::types::EventMessage as EventMessageUpdated; use crate::utils::utc_dt_string_from_timestamp; type IsEventMessage = bool; @@ -386,7 +385,7 @@ impl Sql { Ok(()) } - pub async fn update_metadata( + pub fn update_metadata( &mut self, resource: &Felt, uri: &str, @@ -1184,6 +1183,16 @@ impl Sql { Ok(()) } + + pub fn execute(&self) -> Result<()> { + self.executor.send(QueryMessage { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute, + })?; + + Ok(()) + } } pub fn felts_sql_string(felts: &[Felt]) -> String { diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 59cf4a5357..0e2dceead0 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -65,7 +65,7 @@ where statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute, - }); + })?; Ok(engine) } diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index 53ff0367ff..ef4a44a240 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -4,6 +4,7 @@ mod tests { use dojo_world::metadata::WorldMetadata; use sqlx::SqlitePool; use starknet::core::types::Felt; + use torii_core::executor::Executor; use torii_core::sql::Sql; use crate::schema::build_schema; @@ -48,7 +49,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_metadata(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); let schema = build_schema(&pool).await.unwrap(); let cover_img = "QWxsIHlvdXIgYmFzZSBiZWxvbmcgdG8gdXM="; @@ -70,11 +75,10 @@ mod tests { // TODO: we may want to store here the namespace and the seed. Check the // implementation to actually add those to the metadata table. let world_metadata: WorldMetadata = profile_config.world.into(); - db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP); + db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP).unwrap(); db.update_metadata(&RESOURCE, URI, &world_metadata, &None, &Some(cover_img.to_string())) - .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); let result = run_graphql_query(&schema, QUERY).await; let value = result.get("metadatas").ok_or("metadatas not found").unwrap().clone(); @@ -101,11 +105,15 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_empty_content(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); let schema = build_schema(&pool).await.unwrap(); - db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP); - db.execute().await.unwrap(); + db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP).unwrap(); + db.execute().unwrap(); let result = run_graphql_query(&schema, QUERY).await; let value = result.get("metadatas").ok_or("metadatas not found").unwrap().clone(); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 133b46075e..efdcdee8b4 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -27,6 +27,7 @@ use starknet::providers::{JsonRpcClient, Provider}; use tokio::sync::broadcast; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, Processors}; +use torii_core::executor::Executor; use torii_core::processors::generate_event_processors_map; use torii_core::processors::register_model::RegisterModelProcessor; use torii_core::processors::store_del_record::StoreDelRecordProcessor; @@ -271,7 +272,7 @@ pub async fn model_fixtures(db: &mut Sql) { .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); } pub async fn spinup_types_test() -> Result { @@ -350,12 +351,16 @@ pub async fn spinup_types_test() -> Result { let world = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let db = Sql::new(pool.clone(), strat.world_address).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let db = Sql::new(pool.clone(), strat.world_address, sender).await.unwrap(); let (shutdown_tx, _) = broadcast::channel(1); let mut engine = Engine::new( world, - db, + db.clone(), Arc::clone(&provider), Processors { event: generate_event_processors_map(vec![ @@ -374,6 +379,6 @@ pub async fn spinup_types_test() -> Result { let to = account.provider().block_hash_and_number().await?.block_number; let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); - + db.execute().unwrap(); Ok(pool) } diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index 363082878a..bc8f506bc5 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -13,6 +13,7 @@ mod tests { use starknet::core::types::Event; use starknet_crypto::{poseidon_hash_many, Felt}; use tokio::sync::mpsc; + use torii_core::executor::Executor; use torii_core::sql::{felts_sql_string, Sql}; use crate::tests::{model_fixtures, run_graphql_subscription}; @@ -21,7 +22,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_entity_subscription(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); model_fixtures(&mut db).await; // 0. Preprocess expected entity value @@ -119,7 +124,7 @@ mod tests { ) .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); tx.send(()).await.unwrap(); }); @@ -156,7 +161,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_entity_subscription_with_id(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); model_fixtures(&mut db).await; // 0. Preprocess expected entity value @@ -237,7 +246,7 @@ mod tests { ) .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); tx.send(()).await.unwrap(); }); @@ -271,7 +280,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_model_subscription(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); // 0. Preprocess model value let namespace = "types_test".to_string(); let model_name = "Subrecord".to_string(); @@ -309,7 +322,7 @@ mod tests { ) .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); // 3. fn publish() is called from state.set_entity() @@ -336,7 +349,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_model_subscription_with_id(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); // 0. Preprocess model value let namespace = "types_test".to_string(); let model_name = "Subrecord".to_string(); @@ -373,7 +390,7 @@ mod tests { ) .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); // 3. fn publish() is called from state.set_entity() tx.send(()).await.unwrap(); @@ -402,7 +419,11 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_event_emitted(pool: SqlitePool) { - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); let block_timestamp: u64 = 1710754478_u64; let (tx, mut rx) = mpsc::channel(7); tokio::spawn(async move { @@ -423,8 +444,8 @@ mod tests { }, Felt::ZERO, block_timestamp, - ); - db.execute().await.unwrap(); + ).unwrap(); + db.execute().unwrap(); tx.send(()).await.unwrap(); }); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 0b04574a03..0dd9578e76 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -20,6 +20,7 @@ use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; use tokio::sync::broadcast; use torii_core::engine::{Engine, EngineConfig, Processors}; +use torii_core::executor::Executor; use torii_core::processors::generate_event_processors_map; use torii_core::processors::register_model::RegisterModelProcessor; use torii_core::processors::store_set_record::StoreSetRecordProcessor; @@ -92,7 +93,11 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); - let db = Sql::new(pool.clone(), strat.world_address).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let db = Sql::new(pool.clone(), strat.world_address, sender).await.unwrap(); let (shutdown_tx, _) = broadcast::channel(1); let mut engine = Engine::new( diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 7ef1472068..ca69c9f9f6 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -536,6 +536,7 @@ mod test { use starknet_crypto::Felt; use tokio::select; use tokio::time::sleep; + use torii_core::executor::Executor; use torii_core::sql::Sql; use crate::server::Relay; @@ -559,7 +560,11 @@ mod test { let account = sequencer.account_data(0); - let mut db = Sql::new(pool.clone(), Felt::ZERO).await?; + let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let mut db = Sql::new(pool.clone(), Felt::ZERO, sender).await.unwrap(); // Register the model of our Message db.register_model( @@ -588,7 +593,7 @@ mod test { ) .await .unwrap(); - db.execute().await.unwrap(); + db.execute().unwrap(); // Initialize the relay server let mut relay_server = Relay::new(db, provider, 9900, 9901, 9902, None, None)?; From f9a136f90011085fa8ff2d1286c2ef66b70ca58b Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:01:07 -0400 Subject: [PATCH 06/51] fmt --- crates/torii/core/src/engine.rs | 22 +++---- crates/torii/core/src/lib.rs | 2 +- .../core/src/processors/metadata_update.rs | 3 +- crates/torii/core/src/sql.rs | 65 +++++++++++-------- .../graphql/src/tests/subscription_test.rs | 3 +- 5 files changed, 50 insertions(+), 45 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 3a3b327f04..910ad9ee0a 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -180,21 +180,15 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(()) => { - let _ = self.db.executor.send(QueryMessage { - statement: "".to_string(), - arguments: vec![], - query_type: QueryType::Execute, - }); + Ok(()) => self.db.execute()?, + Err(e) => { + error!(target: LOG_TARGET, error = %e, "Processing fetched data."); + erroring_out = true; + sleep(backoff_delay).await; + if backoff_delay < max_backoff_delay { + backoff_delay *= 2; + } } - Err(e) => { - error!(target: LOG_TARGET, error = %e, "Processing fetched data."); - erroring_out = true; - sleep(backoff_delay).await; - if backoff_delay < max_backoff_delay { - backoff_delay *= 2; - } - } } debug!(target: LOG_TARGET, duration = ?instant.elapsed(), "Processed fetched data."); } diff --git a/crates/torii/core/src/lib.rs b/crates/torii/core/src/lib.rs index d47e9bf71f..ef265847fd 100644 --- a/crates/torii/core/src/lib.rs +++ b/crates/torii/core/src/lib.rs @@ -1,9 +1,9 @@ pub mod cache; pub mod engine; pub mod error; +pub mod executor; pub mod model; pub mod processors; -pub mod executor; pub mod simple_broker; pub mod sql; pub mod types; diff --git a/crates/torii/core/src/processors/metadata_update.rs b/crates/torii/core/src/processors/metadata_update.rs index 6a02995b6c..4b17858d89 100644 --- a/crates/torii/core/src/processors/metadata_update.rs +++ b/crates/torii/core/src/processors/metadata_update.rs @@ -83,8 +83,7 @@ where async fn try_retrieve(mut db: Sql, resource: Felt, uri_str: String) { match metadata(uri_str.clone()).await { Ok((metadata, icon_img, cover_img)) => { - db.update_metadata(&resource, &uri_str, &metadata, &icon_img, &cover_img) - .unwrap(); + db.update_metadata(&resource, &uri_str, &metadata, &icon_img, &cover_img).unwrap(); info!( target: LOG_TARGET, resource = %format!("{:#x}", resource), diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 06feee2234..4b91991ec7 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -43,8 +43,9 @@ impl Sql { executor: UnboundedSender, ) -> Result { executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ - ?)".to_string(), + statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) \ + VALUES (?, ?, ?)" + .to_string(), arguments: vec![ Argument::FieldElement(world_address), Argument::FieldElement(world_address), @@ -54,8 +55,9 @@ impl Sql { })?; executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ - ?)".to_string(), + statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) \ + VALUES (?, ?, ?)" + .to_string(), arguments: vec![ Argument::FieldElement(world_address), Argument::FieldElement(world_address), @@ -107,7 +109,10 @@ impl Sql { Ok(()) } - pub fn set_last_pending_block_world_tx(&mut self, last_pending_block_world_tx: Option) -> Result<()> { + pub fn set_last_pending_block_world_tx( + &mut self, + last_pending_block_world_tx: Option, + ) -> Result<()> { let last_pending_block_world_tx = if let Some(f) = last_pending_block_world_tx { Argument::String(format!("{:#x}", f)) } else { @@ -261,9 +266,13 @@ impl Sql { })?; self.executor.send(QueryMessage { - statement: "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ - model_id) DO NOTHING".to_string(), - arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + statement: "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON \ + CONFLICT(entity_id, model_id) DO NOTHING" + .to_string(), + arguments: vec![ + Argument::String(entity_id.clone()), + Argument::String(model_id.clone()), + ], query_type: QueryType::Other, })?; @@ -318,9 +327,13 @@ impl Sql { query_type: QueryType::EventMessage(entity.clone()), })?; self.executor.send(QueryMessage { - statement: "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ - model_id) DO NOTHING".to_string(), - arguments: vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + statement: "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON \ + CONFLICT(entity_id, model_id) DO NOTHING" + .to_string(), + arguments: vec![ + Argument::String(entity_id.clone()), + Argument::String(model_id.clone()), + ], query_type: QueryType::Other, })?; @@ -373,11 +386,10 @@ impl Sql { let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); self.executor.send(QueryMessage { - statement: - "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON CONFLICT(id) DO \ - UPDATE SET id=excluded.id, executed_at=excluded.executed_at, \ - updated_at=CURRENT_TIMESTAMP" - .to_string(), + statement: "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON \ + CONFLICT(id) DO UPDATE SET id=excluded.id, \ + executed_at=excluded.executed_at, updated_at=CURRENT_TIMESTAMP" + .to_string(), arguments: vec![resource, uri, executed_at], query_type: QueryType::Other, })?; @@ -472,9 +484,10 @@ impl Sql { }; self.executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, calldata, \ - max_fee, signature, nonce, transaction_type, executed_at) VALUES (?, ?, ?, ?, ?, ?, \ - ?, ?, ?)".to_string(), + statement: "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, \ + calldata, max_fee, signature, nonce, transaction_type, executed_at) \ + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" + .to_string(), arguments: vec![ id, transaction_hash, @@ -506,8 +519,9 @@ impl Sql { let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); self.executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, executed_at) VALUES \ - (?, ?, ?, ?, ?) RETURNING *".to_string(), + statement: "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, \ + executed_at) VALUES (?, ?, ?, ?, ?) RETURNING *" + .to_string(), arguments: vec![id, keys, data, hash, executed_at], query_type: QueryType::StoreEvent, })?; @@ -608,7 +622,8 @@ impl Sql { let update_members = |members: &[Member], executor: &mut UnboundedSender, - indexes: &Vec| -> Result<()> { + indexes: &Vec| + -> Result<()> { let table_id = path.join("$"); let mut columns = vec![ "id".to_string(), @@ -719,11 +734,7 @@ impl Sql { Ty::Enum(e) => { if e.options.iter().all( |o| { - if let Ty::Tuple(t) = &o.ty { - t.is_empty() - } else { - false - } + if let Ty::Tuple(t) = &o.ty { t.is_empty() } else { false } }, ) { return Ok(()); diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index bc8f506bc5..aeab4d5a2e 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -444,7 +444,8 @@ mod tests { }, Felt::ZERO, block_timestamp, - ).unwrap(); + ) + .unwrap(); db.execute().unwrap(); tx.send(()).await.unwrap(); From b883343b18ce9ccc4158b41670f7c737fa5c5ce9 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:03:19 -0400 Subject: [PATCH 07/51] executor inside of tokio select --- bin/torii/src/main.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 12bafbb0a7..45dc0b31cc 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -187,10 +187,6 @@ async fn main() -> anyhow::Result<()> { let world = WorldContractReader::new(args.world_address, provider.clone()); let (mut executor, sender) = Executor::new(pool.clone()).await?; - tokio::spawn(async move { - executor.run().await.unwrap(); - }); - let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?; let processors = Processors { @@ -295,6 +291,7 @@ async fn main() -> anyhow::Result<()> { tokio::select! { res = engine.start() => res?, + _ = executor.run() => {}, _ = proxy_server.start(shutdown_tx.subscribe()) => {}, _ = graphql_server => {}, _ = grpc_server => {}, From 7771fdf94f03fdd936a36029367a17d553dc2225 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:19:53 -0400 Subject: [PATCH 08/51] executor graceful exit --- bin/torii/src/main.rs | 2 +- crates/torii/core/src/engine.rs | 1 - crates/torii/core/src/executor.rs | 212 ++++++++++-------- crates/torii/core/src/sql_test.rs | 9 +- .../torii/graphql/src/tests/metadata_test.rs | 7 +- crates/torii/graphql/src/tests/mod.rs | 3 +- .../graphql/src/tests/subscription_test.rs | 17 +- .../grpc/src/server/tests/entities_test.rs | 3 +- crates/torii/libp2p/src/tests.rs | 4 +- .../manifests/dev/deployment/manifest.json | 6 +- .../manifests/dev/deployment/manifest.toml | 6 +- 11 files changed, 155 insertions(+), 115 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 45dc0b31cc..d27ad18fda 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -186,7 +186,7 @@ async fn main() -> anyhow::Result<()> { // Get world address let world = WorldContractReader::new(args.world_address, provider.clone()); - let (mut executor, sender) = Executor::new(pool.clone()).await?; + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await?; let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?; let processors = Processors { diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 910ad9ee0a..991cd3dea7 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -21,7 +21,6 @@ use tokio::task::JoinSet; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; -use crate::executor::{QueryMessage, QueryType}; use crate::processors::event_message::EventMessageProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; use crate::sql::Sql; diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index bbf63fb0fe..47c58f4165 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -3,8 +3,11 @@ use std::mem; use anyhow::{Context, Result}; use dojo_types::schema::{Struct, Ty}; +use sqlx::query::Query; +use sqlx::sqlite::SqliteArguments; use sqlx::{FromRow, Pool, Sqlite, Transaction}; use starknet::core::types::Felt; +use tokio::sync::broadcast::{Receiver, Sender}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use crate::simple_broker::SimpleBroker; @@ -54,6 +57,7 @@ pub struct Executor<'c> { transaction: Transaction<'c, Sqlite>, publish_queue: VecDeque, rx: UnboundedReceiver, + shutdown_rx: Receiver<()>, } pub struct QueryMessage { @@ -63,112 +67,134 @@ pub struct QueryMessage { } impl<'c> Executor<'c> { - pub async fn new(pool: Pool) -> Result<(Self, UnboundedSender)> { + pub async fn new( + pool: Pool, + shutdown_tx: Sender<()>, + ) -> Result<(Self, UnboundedSender)> { let (tx, rx) = unbounded_channel(); let transaction = pool.begin().await?; let publish_queue = VecDeque::new(); + let shutdown_rx = shutdown_tx.subscribe(); - Ok((Executor { pool, transaction, publish_queue, rx }, tx)) + Ok((Executor { pool, transaction, publish_queue, rx, shutdown_rx }, tx)) } pub async fn run(&mut self) -> Result<()> { - while let Some(msg) = self.rx.recv().await { - let tx = &mut self.transaction; - let QueryMessage { statement, arguments, query_type } = msg; - let mut query = sqlx::query(&statement); - - for arg in &arguments { - query = match arg { - Argument::Null => query.bind(None::), - Argument::Int(integer) => query.bind(integer), - Argument::Bool(bool) => query.bind(bool), - Argument::String(string) => query.bind(string), - Argument::FieldElement(felt) => query.bind(format!("{:#x}", felt)), + loop { + tokio::select! { + _ = self.shutdown_rx.recv() => { + break Ok(()); } - } - - match query_type { - QueryType::SetEntity(entity) => { - let row = query.fetch_one(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; - let mut entity_updated = EntityUpdated::from_row(&row)?; - entity_updated.updated_model = Some(entity); - entity_updated.deleted = false; - let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.publish_queue.push_back(broker_message); - } - QueryType::DeleteEntity(entity) => { - let delete_model = query.execute(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; - if delete_model.rows_affected() == 0 { - continue; - } - - let row = sqlx::query( - "UPDATE entities SET updated_at=CURRENT_TIMESTAMP, executed_at=?, \ - event_id=? WHERE id = ? RETURNING *", - ) - .bind(entity.block_timestamp) - .bind(entity.event_id) - .bind(entity.entity_id) - .fetch_one(&mut **tx) - .await?; - let mut entity_updated = EntityUpdated::from_row(&row)?; - entity_updated.updated_model = - Some(Ty::Struct(Struct { name: entity.ty.name(), children: vec![] })); - - let count = sqlx::query_scalar::<_, i64>( - "SELECT count(*) FROM entity_model WHERE entity_id = ?", - ) - .bind(entity_updated.id.clone()) - .fetch_one(&mut **tx) - .await?; - - // Delete entity if all of its models are deleted - if count == 0 { - sqlx::query("DELETE FROM entities WHERE id = ?") - .bind(entity_updated.id.clone()) - .execute(&mut **tx) - .await?; - entity_updated.deleted = true; + Some(msg) = self.rx.recv() => { + let QueryMessage { statement, arguments, query_type } = msg; + let mut query = sqlx::query(&statement); + + for arg in &arguments { + query = match arg { + Argument::Null => query.bind(None::), + Argument::Int(integer) => query.bind(integer), + Argument::Bool(bool) => query.bind(bool), + Argument::String(string) => query.bind(string), + Argument::FieldElement(felt) => query.bind(format!("{:#x}", felt)), + } } - let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.publish_queue.push_back(broker_message); + self.handle_query_type(query, query_type, &statement, &arguments).await?; } - QueryType::RegisterModel => { - let row = query.fetch_one(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; - let model_registered = ModelRegistered::from_row(&row)?; - self.publish_queue.push_back(BrokerMessage::ModelRegistered(model_registered)); - } - QueryType::EventMessage(entity) => { - let row = query.fetch_one(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; - let mut event_message = EventMessageUpdated::from_row(&row)?; - event_message.updated_model = Some(entity); - let broker_message = BrokerMessage::EventMessageUpdated(event_message); - self.publish_queue.push_back(broker_message); - } - QueryType::StoreEvent => { - let row = query.fetch_one(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; - let event = EventEmitted::from_row(&row)?; - self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); - } - QueryType::Execute => { - self.execute().await?; + } + } + } + + async fn handle_query_type<'a>( + &mut self, + query: Query<'a, Sqlite, SqliteArguments<'a>>, + query_type: QueryType, + statement: &str, + arguments: &[Argument], + ) -> Result<()> { + let tx = &mut self.transaction; + + match query_type { + QueryType::SetEntity(entity) => { + let row = query.fetch_one(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let mut entity_updated = EntityUpdated::from_row(&row)?; + entity_updated.updated_model = Some(entity); + entity_updated.deleted = false; + let broker_message = BrokerMessage::EntityUpdated(entity_updated); + self.publish_queue.push_back(broker_message); + } + QueryType::DeleteEntity(entity) => { + let delete_model = query.execute(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + if delete_model.rows_affected() == 0 { + return Ok(()); } - QueryType::Other => { - query.execute(&mut **tx).await.with_context(|| { - format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) - })?; + + let row = sqlx::query( + "UPDATE entities SET updated_at=CURRENT_TIMESTAMP, executed_at=?, event_id=? \ + WHERE id = ? RETURNING *", + ) + .bind(entity.block_timestamp) + .bind(entity.event_id) + .bind(entity.entity_id) + .fetch_one(&mut **tx) + .await?; + let mut entity_updated = EntityUpdated::from_row(&row)?; + entity_updated.updated_model = + Some(Ty::Struct(Struct { name: entity.ty.name(), children: vec![] })); + + let count = sqlx::query_scalar::<_, i64>( + "SELECT count(*) FROM entity_model WHERE entity_id = ?", + ) + .bind(entity_updated.id.clone()) + .fetch_one(&mut **tx) + .await?; + + // Delete entity if all of its models are deleted + if count == 0 { + sqlx::query("DELETE FROM entities WHERE id = ?") + .bind(entity_updated.id.clone()) + .execute(&mut **tx) + .await?; + entity_updated.deleted = true; } + + let broker_message = BrokerMessage::EntityUpdated(entity_updated); + self.publish_queue.push_back(broker_message); + } + QueryType::RegisterModel => { + let row = query.fetch_one(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let model_registered = ModelRegistered::from_row(&row)?; + self.publish_queue.push_back(BrokerMessage::ModelRegistered(model_registered)); + } + QueryType::EventMessage(entity) => { + let row = query.fetch_one(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let mut event_message = EventMessageUpdated::from_row(&row)?; + event_message.updated_model = Some(entity); + let broker_message = BrokerMessage::EventMessageUpdated(event_message); + self.publish_queue.push_back(broker_message); + } + QueryType::StoreEvent => { + let row = query.fetch_one(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; + let event = EventEmitted::from_row(&row)?; + self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); + } + QueryType::Execute => { + self.execute().await?; + } + QueryType::Other => { + query.execute(&mut **tx).await.with_context(|| { + format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) + })?; } } diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 0e2dceead0..938226801e 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -130,7 +130,8 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -290,7 +291,8 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -380,7 +382,8 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index ef4a44a240..cba59b81a7 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -4,6 +4,7 @@ mod tests { use dojo_world::metadata::WorldMetadata; use sqlx::SqlitePool; use starknet::core::types::Felt; + use tokio::sync::broadcast; use torii_core::executor::Executor; use torii_core::sql::Sql; @@ -49,7 +50,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_metadata(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -105,7 +107,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_empty_content(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index efdcdee8b4..47d377a169 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -351,7 +351,8 @@ pub async fn spinup_types_test() -> Result { let world = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index aeab4d5a2e..40e74811af 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -12,7 +12,7 @@ mod tests { use sqlx::SqlitePool; use starknet::core::types::Event; use starknet_crypto::{poseidon_hash_many, Felt}; - use tokio::sync::mpsc; + use tokio::sync::{broadcast, mpsc}; use torii_core::executor::Executor; use torii_core::sql::{felts_sql_string, Sql}; @@ -22,7 +22,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_entity_subscription(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -161,7 +162,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_entity_subscription_with_id(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -280,7 +282,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_model_subscription(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -349,7 +352,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_model_subscription_with_id(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -419,7 +423,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] #[serial] async fn test_event_emitted(pool: SqlitePool) { - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 0dd9578e76..ee85900cd1 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -93,7 +93,8 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index ca69c9f9f6..08b52f8ca6 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -535,6 +535,7 @@ mod test { use starknet::signers::SigningKey; use starknet_crypto::Felt; use tokio::select; + use tokio::sync::broadcast; use tokio::time::sleep; use torii_core::executor::Executor; use torii_core::sql::Sql; @@ -560,7 +561,8 @@ mod test { let account = sequencer.account_data(0); - let (mut executor, sender) = Executor::new(pool.clone()).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/examples/spawn-and-move/manifests/dev/deployment/manifest.json b/examples/spawn-and-move/manifests/dev/deployment/manifest.json index 8665507c3f..aaa60f5ae7 100644 --- a/examples/spawn-and-move/manifests/dev/deployment/manifest.json +++ b/examples/spawn-and-move/manifests/dev/deployment/manifest.json @@ -1234,9 +1234,9 @@ ] } ], - "address": "0x15b584f1132fe386a0aa7a447e7073233a545ac4b84b361ed797edaa8f0f14", - "transaction_hash": "0x715b5d1bde9766b6db3f8b903a0626343c553a91e199f96d721644d121e676b", - "block_number": 3, + "address": "0x5fedbace16902d9ca4cdc1522f9fe156cd8c69a5d25e1436ee4b7b9933ad997", + "transaction_hash": "0x4c8e0d28e32c21f29f33ff68e245b65fcc91763abf53f284cce8c2274ff6115", + "block_number": 6, "seed": "dojo_examples", "metadata": { "profile_name": "dev", diff --git a/examples/spawn-and-move/manifests/dev/deployment/manifest.toml b/examples/spawn-and-move/manifests/dev/deployment/manifest.toml index 0bc300f306..871e3a9faa 100644 --- a/examples/spawn-and-move/manifests/dev/deployment/manifest.toml +++ b/examples/spawn-and-move/manifests/dev/deployment/manifest.toml @@ -3,9 +3,9 @@ kind = "WorldContract" class_hash = "0x5c4271c8cd454ceb8049d2b0724c99d12c2ef8077fc6ad325b18978f614aab0" original_class_hash = "0x5c4271c8cd454ceb8049d2b0724c99d12c2ef8077fc6ad325b18978f614aab0" abi = "manifests/dev/deployment/abis/dojo-world.json" -address = "0x15b584f1132fe386a0aa7a447e7073233a545ac4b84b361ed797edaa8f0f14" -transaction_hash = "0x715b5d1bde9766b6db3f8b903a0626343c553a91e199f96d721644d121e676b" -block_number = 3 +address = "0x5fedbace16902d9ca4cdc1522f9fe156cd8c69a5d25e1436ee4b7b9933ad997" +transaction_hash = "0x4c8e0d28e32c21f29f33ff68e245b65fcc91763abf53f284cce8c2274ff6115" +block_number = 6 seed = "dojo_examples" manifest_name = "dojo-world" From 60c9069183f01a4704bbde55626a90dd7555c64d Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:21:30 -0400 Subject: [PATCH 09/51] priv execute --- crates/torii/core/src/executor.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 47c58f4165..889069c0f8 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -201,7 +201,7 @@ impl<'c> Executor<'c> { Ok(()) } - pub async fn execute(&mut self) -> Result<()> { + async fn execute(&mut self) -> Result<()> { let transaction = mem::replace(&mut self.transaction, self.pool.begin().await?); transaction.commit().await?; From cd52f0f067c6a7f5a395c27481e0895b8c352e9c Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:31:19 -0400 Subject: [PATCH 10/51] contracts insertion shouldnt go through executor --- crates/torii/core/src/sql.rs | 35 ++++++----------------------------- 1 file changed, 6 insertions(+), 29 deletions(-) diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 4b91991ec7..ae9534979d 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -42,35 +42,12 @@ impl Sql { world_address: Felt, executor: UnboundedSender, ) -> Result { - executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) \ - VALUES (?, ?, ?)" - .to_string(), - arguments: vec![ - Argument::FieldElement(world_address), - Argument::FieldElement(world_address), - Argument::String(WORLD_CONTRACT_TYPE.to_string()), - ], - query_type: QueryType::Other, - })?; - - executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) \ - VALUES (?, ?, ?)" - .to_string(), - arguments: vec![ - Argument::FieldElement(world_address), - Argument::FieldElement(world_address), - Argument::String(WORLD_CONTRACT_TYPE.to_string()), - ], - query_type: QueryType::Other, - })?; - - executor.send(QueryMessage { - statement: "".to_string(), - arguments: vec![], - query_type: QueryType::Execute, - })?; + sqlx::query("INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, ?)") + .bind(format!("{:#x}", world_address)) + .bind(format!("{:#x}", world_address)) + .bind(WORLD_CONTRACT_TYPE) + .execute(&pool) + .await?; Ok(Self { pool: pool.clone(), From 045eed001f9741fa7fd3eef0277f942150769e3c Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:41:45 -0400 Subject: [PATCH 11/51] clean code --- crates/torii/core/src/executor.rs | 14 ++ crates/torii/core/src/sql.rs | 229 ++++++++++++------------------ 2 files changed, 103 insertions(+), 140 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 889069c0f8..76237f0a73 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -66,6 +66,20 @@ pub struct QueryMessage { pub query_type: QueryType, } +impl QueryMessage { + pub fn new(statement: String, arguments: Vec, query_type: QueryType) -> Self { + Self { statement, arguments, query_type } + } + + pub fn other(statement: String, arguments: Vec) -> Self { + Self { statement, arguments, query_type: QueryType::Other } + } + + pub fn execute() -> Self { + Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute } + } +} + impl<'c> Executor<'c> { pub async fn new( pool: Pool, diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index ae9534979d..a8d179a1a4 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -42,12 +42,15 @@ impl Sql { world_address: Felt, executor: UnboundedSender, ) -> Result { - sqlx::query("INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, ?)") - .bind(format!("{:#x}", world_address)) - .bind(format!("{:#x}", world_address)) - .bind(WORLD_CONTRACT_TYPE) - .execute(&pool) - .await?; + sqlx::query( + "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ + ?)", + ) + .bind(format!("{:#x}", world_address)) + .bind(format!("{:#x}", world_address)) + .bind(WORLD_CONTRACT_TYPE) + .execute(&pool) + .await?; Ok(Self { pool: pool.clone(), @@ -77,11 +80,10 @@ impl Sql { pub fn set_head(&mut self, head: u64) -> Result<()> { let head = Argument::Int(head.try_into().expect("doesn't fit in u64")); let id = Argument::FieldElement(self.world_address); - self.executor.send(QueryMessage { - statement: "UPDATE contracts SET head = ? WHERE id = ?".to_string(), - arguments: vec![head, id], - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other( + "UPDATE contracts SET head = ? WHERE id = ?".to_string(), + vec![head, id], + ))?; Ok(()) } @@ -98,12 +100,10 @@ impl Sql { let id = Argument::FieldElement(self.world_address); - self.executor.send(QueryMessage { - statement: "UPDATE contracts SET last_pending_block_world_tx = ? WHERE id = ?" - .to_string(), - arguments: vec![last_pending_block_world_tx, id], - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other( + "UPDATE contracts SET last_pending_block_world_tx = ? WHERE id = ?".to_string(), + vec![last_pending_block_world_tx, id], + ))?; Ok(()) } @@ -116,11 +116,10 @@ impl Sql { }; let id = Argument::FieldElement(self.world_address); - self.executor.send(QueryMessage { - statement: "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?".to_string(), - arguments: vec![last_pending_block_tx, id], - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other( + "UPDATE contracts SET last_pending_block_tx = ? WHERE id = ?".to_string(), + vec![last_pending_block_tx, id], + ))?; Ok(()) } @@ -158,11 +157,11 @@ impl Sql { Argument::Int(unpacked_size as i64), Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.executor.send(QueryMessage { - statement: insert_models.to_string(), + self.executor.send(QueryMessage::new( + insert_models.to_string(), arguments, - query_type: QueryType::RegisterModel, - })?; + QueryType::RegisterModel, + ))?; let mut model_idx = 0_i64; self.build_register_queries_recursive( @@ -236,22 +235,18 @@ impl Sql { arguments.push(Argument::String(keys.to_string())); } - self.executor.send(QueryMessage { - statement: insert_entities.to_string(), + self.executor.send(QueryMessage::new( + insert_entities.to_string(), arguments, - query_type: QueryType::SetEntity(entity.clone()), - })?; + QueryType::SetEntity(entity.clone()), + ))?; - self.executor.send(QueryMessage { - statement: "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON \ - CONFLICT(entity_id, model_id) DO NOTHING" + self.executor.send(QueryMessage::other( + "INSERT INTO entity_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ + model_id) DO NOTHING" .to_string(), - arguments: vec![ - Argument::String(entity_id.clone()), - Argument::String(model_id.clone()), - ], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + ))?; let path = vec![namespaced_name]; self.build_set_entity_queries_recursive( @@ -293,26 +288,22 @@ impl Sql { VALUES (?, ?, ?, ?) ON CONFLICT(id) DO UPDATE SET \ updated_at=CURRENT_TIMESTAMP, executed_at=EXCLUDED.executed_at, \ event_id=EXCLUDED.event_id RETURNING *"; - self.executor.send(QueryMessage { - statement: insert_entities.to_string(), - arguments: vec![ + self.executor.send(QueryMessage::new( + insert_entities.to_string(), + vec![ Argument::String(entity_id.clone()), Argument::String(keys_str), Argument::String(event_id.to_string()), Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ], - query_type: QueryType::EventMessage(entity.clone()), - })?; - self.executor.send(QueryMessage { - statement: "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON \ - CONFLICT(entity_id, model_id) DO NOTHING" + QueryType::EventMessage(entity.clone()), + ))?; + self.executor.send(QueryMessage::other( + "INSERT INTO event_model (entity_id, model_id) VALUES (?, ?) ON CONFLICT(entity_id, \ + model_id) DO NOTHING" .to_string(), - arguments: vec![ - Argument::String(entity_id.clone()), - Argument::String(model_id.clone()), - ], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.clone()), Argument::String(model_id.clone())], + ))?; let path = vec![namespaced_name]; self.build_set_entity_queries_recursive( @@ -340,19 +331,16 @@ impl Sql { // delete entity models data self.build_delete_entity_queries_recursive(path, &entity_id, &entity)?; - self.executor.send(QueryMessage { - statement: "DELETE FROM entity_model WHERE entity_id = ? AND model_id = ?".to_string(), - arguments: vec![ - Argument::String(entity_id.clone()), - Argument::String(format!("{:#x}", model_id)), - ], - query_type: QueryType::DeleteEntity(DeleteEntityQuery { + self.executor.send(QueryMessage::new( + "DELETE FROM entity_model WHERE entity_id = ? AND model_id = ?".to_string(), + vec![Argument::String(entity_id.clone()), Argument::String(format!("{:#x}", model_id))], + QueryType::DeleteEntity(DeleteEntityQuery { entity_id: entity_id.clone(), event_id: event_id.to_string(), block_timestamp: utc_dt_string_from_timestamp(block_timestamp), ty: entity.clone(), }), - })?; + ))?; Ok(()) } @@ -362,14 +350,13 @@ impl Sql { let uri = Argument::String(uri.to_string()); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); - self.executor.send(QueryMessage { - statement: "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON \ - CONFLICT(id) DO UPDATE SET id=excluded.id, \ - executed_at=excluded.executed_at, updated_at=CURRENT_TIMESTAMP" + self.executor.send(QueryMessage::other( + "INSERT INTO metadata (id, uri, executed_at) VALUES (?, ?, ?) ON CONFLICT(id) DO \ + UPDATE SET id=excluded.id, executed_at=excluded.executed_at, \ + updated_at=CURRENT_TIMESTAMP" .to_string(), - arguments: vec![resource, uri, executed_at], - query_type: QueryType::Other, - })?; + vec![resource, uri, executed_at], + ))?; Ok(()) } @@ -400,7 +387,7 @@ impl Sql { let statement = format!("UPDATE metadata SET {} WHERE id = ?", update.join(",")); arguments.push(Argument::FieldElement(*resource)); - self.executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other })?; + self.executor.send(QueryMessage::other(statement, arguments))?; Ok(()) } @@ -460,12 +447,12 @@ impl Sql { _ => return Ok(()), }; - self.executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, \ - calldata, max_fee, signature, nonce, transaction_type, executed_at) \ - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" + self.executor.send(QueryMessage::other( + "INSERT OR IGNORE INTO transactions (id, transaction_hash, sender_address, calldata, \ + max_fee, signature, nonce, transaction_type, executed_at) VALUES (?, ?, ?, ?, ?, ?, \ + ?, ?, ?)" .to_string(), - arguments: vec![ + vec![ id, transaction_hash, sender_address, @@ -476,8 +463,7 @@ impl Sql { Argument::String(transaction_type.to_string()), Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ], - query_type: QueryType::Other, - })?; + ))?; Ok(()) } @@ -495,13 +481,13 @@ impl Sql { let hash = Argument::FieldElement(transaction_hash); let executed_at = Argument::String(utc_dt_string_from_timestamp(block_timestamp)); - self.executor.send(QueryMessage { - statement: "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, \ - executed_at) VALUES (?, ?, ?, ?, ?) RETURNING *" + self.executor.send(QueryMessage::new( + "INSERT OR IGNORE INTO events (id, keys, data, transaction_hash, executed_at) VALUES \ + (?, ?, ?, ?, ?) RETURNING *" .to_string(), - arguments: vec![id, keys, data, hash, executed_at], - query_type: QueryType::StoreEvent, - })?; + vec![id, keys, data, hash, executed_at], + QueryType::StoreEvent, + ))?; Ok(()) } @@ -686,7 +672,7 @@ impl Sql { ) }; - executor.send(QueryMessage { statement, arguments, query_type: QueryType::Other })?; + executor.send(QueryMessage::other(statement, arguments))?; Ok(()) }; @@ -786,11 +772,7 @@ impl Sql { let mut arguments = vec![Argument::String(entity_id.to_string())]; arguments.extend(indexes.iter().map(|idx| Argument::Int(*idx))); - self.executor.send(QueryMessage { - statement: query, - arguments, - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(query, arguments))?; // insert the new array elements for (idx, member) in array.iter().enumerate() { @@ -831,11 +813,10 @@ impl Sql { Ty::Struct(s) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.executor.send(QueryMessage { + self.executor.send(QueryMessage::other( statement, - arguments: vec![Argument::String(entity_id.to_string())], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.to_string())], + ))?; for member in s.children.iter() { let mut path_clone = path.clone(); path_clone.push(member.name.clone()); @@ -852,11 +833,10 @@ impl Sql { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.executor.send(QueryMessage { + self.executor.send(QueryMessage::other( statement, - arguments: vec![Argument::String(entity_id.to_string())], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.to_string())], + ))?; for child in e.options.iter() { if let Ty::Tuple(t) = &child.ty { @@ -873,11 +853,10 @@ impl Sql { Ty::Array(array) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.executor.send(QueryMessage { + self.executor.send(QueryMessage::other( statement, - arguments: vec![Argument::String(entity_id.to_string())], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.to_string())], + ))?; for member in array.iter() { let mut path_clone = path.clone(); @@ -888,11 +867,10 @@ impl Sql { Ty::Tuple(t) => { let table_id = path.join("$"); let statement = format!("DELETE FROM [{table_id}] WHERE entity_id = ?"); - self.executor.send(QueryMessage { + self.executor.send(QueryMessage::other( statement, - arguments: vec![Argument::String(entity_id.to_string())], - query_type: QueryType::Other, - })?; + vec![Argument::String(entity_id.to_string())], + ))?; for (idx, member) in t.iter().enumerate() { let mut path_clone = path.clone(); @@ -1008,11 +986,7 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.executor.send(QueryMessage { - statement: statement.to_string(), - arguments, - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(statement.to_string(), arguments))?; } } Ty::Tuple(tuple) => { @@ -1040,11 +1014,7 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.executor.send(QueryMessage { - statement: statement.to_string(), - arguments, - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(statement.to_string(), arguments))?; } } Ty::Array(array) => { @@ -1069,11 +1039,7 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.executor.send(QueryMessage { - statement: statement.to_string(), - arguments, - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(statement.to_string(), arguments))?; } Ty::Enum(e) => { for (idx, child) in e @@ -1112,11 +1078,7 @@ impl Sql { Argument::String(utc_dt_string_from_timestamp(block_timestamp)), ]; - self.executor.send(QueryMessage { - statement: statement.to_string(), - arguments, - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(statement.to_string(), arguments))?; } } _ => {} @@ -1155,30 +1117,17 @@ impl Sql { create_table_query .push_str("FOREIGN KEY (event_message_id) REFERENCES event_messages(id));"); - self.executor.send(QueryMessage { - statement: create_table_query, - arguments: vec![], - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(create_table_query, vec![]))?; for s in indices.iter() { - self.executor.send(QueryMessage { - statement: s.to_string(), - arguments: vec![], - query_type: QueryType::Other, - })?; + self.executor.send(QueryMessage::other(s.to_string(), vec![]))?; } Ok(()) } pub fn execute(&self) -> Result<()> { - self.executor.send(QueryMessage { - statement: "".to_string(), - arguments: vec![], - query_type: QueryType::Execute, - })?; - + self.executor.send(QueryMessage::execute())?; Ok(()) } } From 045e4ae679813e4d7a75560e521689e545615e57 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 12:45:04 -0400 Subject: [PATCH 12/51] exec --- crates/torii/core/src/sql_test.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 938226801e..dd98a7b916 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -20,7 +20,7 @@ use starknet_crypto::poseidon_hash_many; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; -use crate::executor::{Executor, QueryMessage, QueryType}; +use crate::executor::{Executor, QueryMessage}; use crate::processors::generate_event_processors_map; use crate::processors::register_model::RegisterModelProcessor; use crate::processors::store_del_record::StoreDelRecordProcessor; @@ -61,11 +61,7 @@ where let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); - db.executor.send(QueryMessage { - statement: "".to_string(), - arguments: vec![], - query_type: QueryType::Execute, - })?; + db.executor.send(QueryMessage::execute())?; Ok(engine) } From b7acef5afc579df0942f9fb9701d09144fb36aed Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 13:08:35 -0400 Subject: [PATCH 13/51] fix: tests --- crates/torii/core/src/engine.rs | 10 +++--- crates/torii/core/src/executor.rs | 2 ++ crates/torii/core/src/sql_test.rs | 14 ++++++++ crates/torii/libp2p/src/tests.rs | 57 ------------------------------- 4 files changed, 21 insertions(+), 62 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 991cd3dea7..40cb52e86c 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -7,6 +7,7 @@ use std::time::Duration; use anyhow::Result; use bitflags::bitflags; use dojo_world::contracts::world::WorldContractReader; +use futures_util::future::try_join_all; use hashlink::LinkedHashMap; use starknet::core::types::{ BlockId, BlockTag, EmittedEvent, Event, EventFilter, Felt, MaybePendingBlockWithReceipts, @@ -17,7 +18,6 @@ use starknet::providers::Provider; use tokio::sync::broadcast::Sender; use tokio::sync::mpsc::Sender as BoundedSender; use tokio::sync::Semaphore; -use tokio::task::JoinSet; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; @@ -500,14 +500,14 @@ impl Engine

{ let semaphore = Arc::new(Semaphore::new(self.config.max_concurrent_tasks)); // Run all tasks concurrently - let mut set = JoinSet::new(); + let mut handles = Vec::new(); for (task_id, events) in self.tasks.drain() { let db = self.db.clone(); let world = self.world.clone(); let processors = self.processors.clone(); let semaphore = semaphore.clone(); - set.spawn(async move { + handles.push(tokio::spawn(async move { let _permit = semaphore.acquire().await.unwrap(); let mut local_db = db.clone(); for ParallelizedEvent { event_id, event, block_number, block_timestamp } in events { @@ -523,11 +523,11 @@ impl Engine

{ } } Ok::<_, anyhow::Error>(local_db) - }); + })); } // Join all tasks - while let Some(_) = set.join_next().await {} + try_join_all(handles).await?; Ok(()) } diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 76237f0a73..599f594c1b 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -52,6 +52,7 @@ pub enum QueryType { Other, } +#[derive(Debug)] pub struct Executor<'c> { pool: Pool, transaction: Transaction<'c, Sqlite>, @@ -60,6 +61,7 @@ pub struct Executor<'c> { shutdown_rx: Receiver<()>, } +#[derive(Debug, Clone)] pub struct QueryMessage { pub statement: String, pub arguments: Vec, diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index dd98a7b916..ec5433cb15 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -124,6 +124,19 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); + // move + let tx = &account + .execute_v1(vec![Call { + to: actions_address, + selector: get_selector_from_name("move").unwrap(), + calldata: vec![Felt::ONE], + }]) + .send() + .await + .unwrap(); + + TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); + let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); let (shutdown_tx, _) = broadcast::channel(1); @@ -186,6 +199,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { assert_eq!(unpacked_size, 0); assert_eq!(count_table("entities", &pool).await, 2); + assert_eq!(count_table("event_messages", &pool).await, 1); let (id, keys): (String, String) = sqlx::query_as( format!( diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 964b656f2f..08b52f8ca6 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -12,15 +12,9 @@ mod test { use crypto_bigint::U256; use dojo_types::primitive::Primitive; use dojo_types::schema::{Enum, EnumOption, Member, Struct, Ty}; - use dojo_world::contracts::abi::model::Layout; - use futures::StreamExt; use katana_runner::KatanaRunner; use serde_json::Number; - use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; use starknet::core::types::Felt; - use torii_core::simple_broker::SimpleBroker; - use torii_core::sql::Sql; - use torii_core::types::EventMessage; #[cfg(target_arch = "wasm32")] use wasm_bindgen_test::*; @@ -693,57 +687,6 @@ mod test { } } - // Test to verify that setting an entity message in the SQL database - // triggers a publish event on the broker - #[tokio::test] - async fn test_entity_message_trigger_publish() -> Result<(), Box> { - let _ = tracing_subscriber::fmt() - .with_env_filter("torii::relay::client=debug,torii::relay::server=debug") - .try_init(); - - let options = ::from_str("sqlite::memory:") - .unwrap() - .create_if_missing(true); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); - sqlx::migrate!("../migrations").run(&pool).await.unwrap(); - - let mut db = Sql::new(pool.clone(), Felt::ZERO).await.unwrap(); - let mut broker = SimpleBroker::::subscribe(); - - let entity = Ty::Struct(Struct { name: "Message".to_string(), children: vec![] }); - db.register_model( - "test_namespace", - entity.clone(), - Layout::Fixed(vec![]), - Felt::ZERO, - Felt::ZERO, - 0, - 0, - 0, - ) - .await?; - - // FIXME: register_model and set_event_message handle the name and namespace of entity type - // differently. - let entity = - Ty::Struct(Struct { name: "test_namespace-Message".to_string(), children: vec![] }); - - // Set the event message in the database - db.set_event_message(entity, "some_entity_id", 0).await?; - db.query_queue.execute_all().await?; - - // Check if a message was published to the broker - tokio::select! { - Some(message) = broker.next() => { - println!("Received message: {:?}", message); - Ok(()) - }, - _ = tokio::time::sleep(std::time::Duration::from_secs(5)) => { - Err("Timeout: No message received".into()) - } - } - } - #[cfg(target_arch = "wasm32")] #[wasm_bindgen_test] async fn test_client_connection_wasm() -> Result<(), Box> { From b94ad7aa0149001b27453f9a2d4ffaec344e046c Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 14:00:23 -0400 Subject: [PATCH 14/51] oneshot channel for execution result --- crates/torii/core/src/engine.rs | 2 +- crates/torii/core/src/executor.rs | 23 +++++++++++-------- crates/torii/core/src/sql.rs | 8 ++++--- crates/torii/core/src/sql_test.rs | 4 ++-- .../torii/graphql/src/tests/metadata_test.rs | 6 ++--- crates/torii/graphql/src/tests/mod.rs | 4 ++-- .../graphql/src/tests/subscription_test.rs | 10 ++++---- crates/torii/libp2p/src/tests.rs | 2 +- 8 files changed, 33 insertions(+), 26 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 40cb52e86c..c54e641a9b 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -179,7 +179,7 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(()) => self.db.execute()?, + Ok(()) => self.db.execute().await?, Err(e) => { error!(target: LOG_TARGET, error = %e, "Processing fetched data."); erroring_out = true; diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 599f594c1b..c150efabf0 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -9,6 +9,7 @@ use sqlx::{FromRow, Pool, Sqlite, Transaction}; use starknet::core::types::Felt; use tokio::sync::broadcast::{Receiver, Sender}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; +use tokio::sync::oneshot; use crate::simple_broker::SimpleBroker; use crate::types::{ @@ -41,14 +42,14 @@ pub struct DeleteEntityQuery { pub ty: Ty, } -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum QueryType { SetEntity(Ty), DeleteEntity(DeleteEntityQuery), EventMessage(Ty), RegisterModel, StoreEvent, - Execute, + Execute(oneshot::Sender>), Other, } @@ -61,7 +62,7 @@ pub struct Executor<'c> { shutdown_rx: Receiver<()>, } -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct QueryMessage { pub statement: String, pub arguments: Vec, @@ -77,8 +78,8 @@ impl QueryMessage { Self { statement, arguments, query_type: QueryType::Other } } - pub fn execute() -> Self { - Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute } + pub fn execute(sender: oneshot::Sender>) -> Self { + Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute(sender) } } } @@ -99,6 +100,7 @@ impl<'c> Executor<'c> { loop { tokio::select! { _ = self.shutdown_rx.recv() => { + println!("Shutting down executor"); break Ok(()); } Some(msg) = self.rx.recv() => { @@ -204,8 +206,8 @@ impl<'c> Executor<'c> { let event = EventEmitted::from_row(&row)?; self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); } - QueryType::Execute => { - self.execute().await?; + QueryType::Execute(sender) => { + self.execute(sender).await?; } QueryType::Other => { query.execute(&mut **tx).await.with_context(|| { @@ -217,7 +219,7 @@ impl<'c> Executor<'c> { Ok(()) } - async fn execute(&mut self) -> Result<()> { + async fn execute(&mut self, sender: oneshot::Sender>) -> Result<()> { let transaction = mem::replace(&mut self.transaction, self.pool.begin().await?); transaction.commit().await?; @@ -225,7 +227,10 @@ impl<'c> Executor<'c> { send_broker_message(message); } - Ok(()) + match sender.send(Ok(())) { + Ok(()) => Ok(()), + Err(_) => Err(anyhow::anyhow!("Failed to send result to sender")), + } } } diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index a8d179a1a4..de493b4859 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -13,6 +13,7 @@ use sqlx::{Pool, Sqlite}; use starknet::core::types::{Event, Felt, InvokeTransaction, Transaction}; use starknet_crypto::poseidon_hash_many; use tokio::sync::mpsc::UnboundedSender; +use tokio::sync::oneshot; use crate::cache::{Model, ModelCache}; use crate::executor::{Argument, DeleteEntityQuery, QueryMessage, QueryType}; @@ -1126,9 +1127,10 @@ impl Sql { Ok(()) } - pub fn execute(&self) -> Result<()> { - self.executor.send(QueryMessage::execute())?; - Ok(()) + pub async fn execute(&self) -> Result<()> { + let (sender, receiver) = oneshot::channel(); + self.executor.send(QueryMessage::execute(sender))?; + receiver.await? } } diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index ec5433cb15..4d06e2365f 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -20,7 +20,7 @@ use starknet_crypto::poseidon_hash_many; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; -use crate::executor::{Executor, QueryMessage}; +use crate::executor::Executor; use crate::processors::generate_event_processors_map; use crate::processors::register_model::RegisterModelProcessor; use crate::processors::store_del_record::StoreDelRecordProcessor; @@ -61,7 +61,7 @@ where let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); - db.executor.send(QueryMessage::execute())?; + db.execute().await.unwrap(); Ok(engine) } diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index cba59b81a7..35e039d516 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -80,7 +80,7 @@ mod tests { db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP).unwrap(); db.update_metadata(&RESOURCE, URI, &world_metadata, &None, &Some(cover_img.to_string())) .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); let result = run_graphql_query(&schema, QUERY).await; let value = result.get("metadatas").ok_or("metadatas not found").unwrap().clone(); @@ -108,7 +108,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_empty_content(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -116,7 +116,7 @@ mod tests { let schema = build_schema(&pool).await.unwrap(); db.set_metadata(&RESOURCE, URI, BLOCK_TIMESTAMP).unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); let result = run_graphql_query(&schema, QUERY).await; let value = result.get("metadatas").ok_or("metadatas not found").unwrap().clone(); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 47d377a169..ad7308dc40 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -272,7 +272,7 @@ pub async fn model_fixtures(db: &mut Sql) { .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); } pub async fn spinup_types_test() -> Result { @@ -380,6 +380,6 @@ pub async fn spinup_types_test() -> Result { let to = account.provider().block_hash_and_number().await?.block_number; let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); Ok(pool) } diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index 40e74811af..cfb2dbff7a 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -125,7 +125,7 @@ mod tests { ) .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); tx.send(()).await.unwrap(); }); @@ -248,7 +248,7 @@ mod tests { ) .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); tx.send(()).await.unwrap(); }); @@ -325,7 +325,7 @@ mod tests { ) .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); // 3. fn publish() is called from state.set_entity() @@ -394,7 +394,7 @@ mod tests { ) .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); // 3. fn publish() is called from state.set_entity() tx.send(()).await.unwrap(); @@ -451,7 +451,7 @@ mod tests { block_timestamp, ) .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); tx.send(()).await.unwrap(); }); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 08b52f8ca6..99068feccf 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -595,7 +595,7 @@ mod test { ) .await .unwrap(); - db.execute().unwrap(); + db.execute().await.unwrap(); // Initialize the relay server let mut relay_server = Relay::new(db, provider, 9900, 9901, 9902, None, None)?; From c13ff59376d2f2780c60181a22a4abcb0a4d68e5 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 14:00:35 -0400 Subject: [PATCH 15/51] fmt --- crates/torii/core/src/executor.rs | 6 +++++- crates/torii/graphql/src/tests/metadata_test.rs | 3 ++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index c150efabf0..a51575f88a 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -79,7 +79,11 @@ impl QueryMessage { } pub fn execute(sender: oneshot::Sender>) -> Self { - Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute(sender) } + Self { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute(sender), + } } } diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index 35e039d516..03c2098deb 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -108,7 +108,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_empty_content(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); From 7fc27d538e5a5da75a24687a0e8e737c16da5330 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 14:02:34 -0400 Subject: [PATCH 16/51] clone shutdown tx --- crates/torii/core/src/sql_test.rs | 6 +++--- crates/torii/graphql/src/tests/metadata_test.rs | 2 +- crates/torii/graphql/src/tests/mod.rs | 2 +- crates/torii/graphql/src/tests/subscription_test.rs | 10 +++++----- crates/torii/grpc/src/server/tests/entities_test.rs | 2 +- crates/torii/libp2p/src/tests.rs | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 4d06e2365f..dcc95fca75 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -140,7 +140,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -302,7 +302,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -393,7 +393,7 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index 03c2098deb..bd303a37a3 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -51,7 +51,7 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_metadata(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index ad7308dc40..374bb4c0d6 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -352,7 +352,7 @@ pub async fn spinup_types_test() -> Result { let world = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index cfb2dbff7a..8c090a661d 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -23,7 +23,7 @@ mod tests { #[serial] async fn test_entity_subscription(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -163,7 +163,7 @@ mod tests { #[serial] async fn test_entity_subscription_with_id(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -283,7 +283,7 @@ mod tests { #[serial] async fn test_model_subscription(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -353,7 +353,7 @@ mod tests { #[serial] async fn test_model_subscription_with_id(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -424,7 +424,7 @@ mod tests { #[serial] async fn test_event_emitted(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index ee85900cd1..924517d163 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -94,7 +94,7 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 99068feccf..23cc5648bb 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -562,7 +562,7 @@ mod test { let account = sequencer.account_data(0); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx).await.unwrap(); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); From 260845c4e4e252a7c084c69a83ee91718003c01c Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 14:02:54 -0400 Subject: [PATCH 17/51] fmt --- crates/torii/graphql/src/tests/metadata_test.rs | 3 ++- .../torii/graphql/src/tests/subscription_test.rs | 15 ++++++++++----- crates/torii/libp2p/src/tests.rs | 3 ++- 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/crates/torii/graphql/src/tests/metadata_test.rs b/crates/torii/graphql/src/tests/metadata_test.rs index bd303a37a3..8c5e3dff63 100644 --- a/crates/torii/graphql/src/tests/metadata_test.rs +++ b/crates/torii/graphql/src/tests/metadata_test.rs @@ -51,7 +51,8 @@ mod tests { #[sqlx::test(migrations = "../migrations")] async fn test_metadata(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/graphql/src/tests/subscription_test.rs b/crates/torii/graphql/src/tests/subscription_test.rs index 8c090a661d..2e32e0c194 100644 --- a/crates/torii/graphql/src/tests/subscription_test.rs +++ b/crates/torii/graphql/src/tests/subscription_test.rs @@ -23,7 +23,8 @@ mod tests { #[serial] async fn test_entity_subscription(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -163,7 +164,8 @@ mod tests { #[serial] async fn test_entity_subscription_with_id(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -283,7 +285,8 @@ mod tests { #[serial] async fn test_model_subscription(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -353,7 +356,8 @@ mod tests { #[serial] async fn test_model_subscription_with_id(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); @@ -424,7 +428,8 @@ mod tests { #[serial] async fn test_event_emitted(pool: SqlitePool) { let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 23cc5648bb..cdd71d5ff3 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -562,7 +562,8 @@ mod test { let account = sequencer.account_data(0); let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + let (mut executor, sender) = + Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { executor.run().await.unwrap(); }); From a7e4f1f2b64949269f672e0444adf0ba9fe3648d Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 17:35:53 -0400 Subject: [PATCH 18/51] fix: test exec --- crates/torii/core/src/executor.rs | 11 +++++------ crates/torii/grpc/src/server/tests/entities_test.rs | 2 ++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index a51575f88a..03451259b0 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -211,7 +211,9 @@ impl<'c> Executor<'c> { self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); } QueryType::Execute(sender) => { - self.execute(sender).await?; + sender + .send(self.execute().await) + .map_err(|_| anyhow::anyhow!("Failed to send execute result"))?; } QueryType::Other => { query.execute(&mut **tx).await.with_context(|| { @@ -223,7 +225,7 @@ impl<'c> Executor<'c> { Ok(()) } - async fn execute(&mut self, sender: oneshot::Sender>) -> Result<()> { + async fn execute(&mut self) -> Result<()> { let transaction = mem::replace(&mut self.transaction, self.pool.begin().await?); transaction.commit().await?; @@ -231,10 +233,7 @@ impl<'c> Executor<'c> { send_broker_message(message); } - match sender.send(Ok(())) { - Ok(()) => Ok(()), - Err(_) => Err(anyhow::anyhow!("Failed to send result to sender")), - } + Ok(()) } } diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 924517d163..6ef223cb26 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -122,6 +122,8 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { let data = engine.fetch_range(0, to, None).await.unwrap(); engine.process_range(data).await.unwrap(); + db.execute().await.unwrap(); + let (_, receiver) = tokio::sync::mpsc::channel(1); let grpc = DojoWorld::new(db.pool, receiver, strat.world_address, provider.clone()); From 8cf445269dd509db88f50119a5868f6268f36ec8 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 17:59:24 -0400 Subject: [PATCH 19/51] non bloking execute engine --- crates/torii/core/src/engine.rs | 3 ++- crates/torii/core/src/executor.rs | 45 ++++++++++++++++++++++--------- crates/torii/core/src/sql.rs | 7 +++-- 3 files changed, 37 insertions(+), 18 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index c54e641a9b..54f16af359 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -21,6 +21,7 @@ use tokio::sync::Semaphore; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; +use crate::executor::QueryMessage; use crate::processors::event_message::EventMessageProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; use crate::sql::Sql; @@ -179,7 +180,7 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(()) => self.db.execute().await?, + Ok(()) => self.db.executor.send(QueryMessage::execute())?, Err(e) => { error!(target: LOG_TARGET, error = %e, "Processing fetched data."); erroring_out = true; diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 03451259b0..3a238cea06 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -49,7 +49,7 @@ pub enum QueryType { EventMessage(Ty), RegisterModel, StoreEvent, - Execute(oneshot::Sender>), + Execute, Other, } @@ -67,23 +67,40 @@ pub struct QueryMessage { pub statement: String, pub arguments: Vec, pub query_type: QueryType, + tx: Option>>, } impl QueryMessage { pub fn new(statement: String, arguments: Vec, query_type: QueryType) -> Self { - Self { statement, arguments, query_type } + Self { statement, arguments, query_type, tx: None } + } + + pub fn new_recv(statement: String, arguments: Vec, query_type: QueryType) -> (Self, oneshot::Receiver>) { + let (tx, rx) = oneshot::channel(); + (Self { statement, arguments, query_type, tx: Some(tx) }, rx) } pub fn other(statement: String, arguments: Vec) -> Self { - Self { statement, arguments, query_type: QueryType::Other } + Self { statement, arguments, query_type: QueryType::Other, tx: None } + } + + pub fn other_recv(statement: String, arguments: Vec) -> (Self, oneshot::Receiver>) { + let (tx, rx) = oneshot::channel(); + (Self { statement, arguments, query_type: QueryType::Other, tx: Some(tx) }, rx) } - pub fn execute(sender: oneshot::Sender>) -> Self { - Self { + pub fn execute() -> Self { + Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute, tx: None } + } + + pub fn execute_recv() -> (Self, oneshot::Receiver>) { + let (tx, rx) = oneshot::channel(); + (Self { statement: "".to_string(), arguments: vec![], - query_type: QueryType::Execute(sender), - } + query_type: QueryType::Execute, + tx: Some(tx), + }, rx) } } @@ -108,7 +125,7 @@ impl<'c> Executor<'c> { break Ok(()); } Some(msg) = self.rx.recv() => { - let QueryMessage { statement, arguments, query_type } = msg; + let QueryMessage { statement, arguments, query_type, tx } = msg; let mut query = sqlx::query(&statement); for arg in &arguments { @@ -121,7 +138,7 @@ impl<'c> Executor<'c> { } } - self.handle_query_type(query, query_type, &statement, &arguments).await?; + self.handle_query_type(query, query_type, &statement, &arguments, tx).await?; } } } @@ -133,6 +150,7 @@ impl<'c> Executor<'c> { query_type: QueryType, statement: &str, arguments: &[Argument], + sender: Option>>, ) -> Result<()> { let tx = &mut self.transaction; @@ -210,10 +228,11 @@ impl<'c> Executor<'c> { let event = EventEmitted::from_row(&row)?; self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); } - QueryType::Execute(sender) => { - sender - .send(self.execute().await) - .map_err(|_| anyhow::anyhow!("Failed to send execute result"))?; + QueryType::Execute => { + let res = self.execute().await; + if let Some(sender) = sender { + sender.send(res).map_err(|_| anyhow::anyhow!("Failed to send execute result"))?; + } } QueryType::Other => { query.execute(&mut **tx).await.with_context(|| { diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index de493b4859..7bee0d10ac 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -13,7 +13,6 @@ use sqlx::{Pool, Sqlite}; use starknet::core::types::{Event, Felt, InvokeTransaction, Transaction}; use starknet_crypto::poseidon_hash_many; use tokio::sync::mpsc::UnboundedSender; -use tokio::sync::oneshot; use crate::cache::{Model, ModelCache}; use crate::executor::{Argument, DeleteEntityQuery, QueryMessage, QueryType}; @@ -1128,9 +1127,9 @@ impl Sql { } pub async fn execute(&self) -> Result<()> { - let (sender, receiver) = oneshot::channel(); - self.executor.send(QueryMessage::execute(sender))?; - receiver.await? + let (execute, recv) = QueryMessage::execute_recv(); + self.executor.send(execute)?; + recv.await? } } From 2bcf226c5188f1eabc2bf2d9a19ab25c1ac7d6e6 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 18:09:18 -0400 Subject: [PATCH 20/51] executor logs --- crates/torii/core/src/executor.rs | 56 ++++++++++++++++++++++++------- 1 file changed, 44 insertions(+), 12 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 3a238cea06..4abb381327 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -10,6 +10,8 @@ use starknet::core::types::Felt; use tokio::sync::broadcast::{Receiver, Sender}; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender}; use tokio::sync::oneshot; +use tokio::time::Instant; +use tracing::{debug, error}; use crate::simple_broker::SimpleBroker; use crate::types::{ @@ -17,6 +19,8 @@ use crate::types::{ Model as ModelRegistered, }; +pub(crate) const LOG_TARGET: &str = "torii_core::executor"; + #[derive(Debug, Clone)] pub enum Argument { Null, @@ -42,7 +46,7 @@ pub struct DeleteEntityQuery { pub ty: Ty, } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum QueryType { SetEntity(Ty), DeleteEntity(DeleteEntityQuery), @@ -75,7 +79,11 @@ impl QueryMessage { Self { statement, arguments, query_type, tx: None } } - pub fn new_recv(statement: String, arguments: Vec, query_type: QueryType) -> (Self, oneshot::Receiver>) { + pub fn new_recv( + statement: String, + arguments: Vec, + query_type: QueryType, + ) -> (Self, oneshot::Receiver>) { let (tx, rx) = oneshot::channel(); (Self { statement, arguments, query_type, tx: Some(tx) }, rx) } @@ -84,23 +92,34 @@ impl QueryMessage { Self { statement, arguments, query_type: QueryType::Other, tx: None } } - pub fn other_recv(statement: String, arguments: Vec) -> (Self, oneshot::Receiver>) { + pub fn other_recv( + statement: String, + arguments: Vec, + ) -> (Self, oneshot::Receiver>) { let (tx, rx) = oneshot::channel(); (Self { statement, arguments, query_type: QueryType::Other, tx: Some(tx) }, rx) } pub fn execute() -> Self { - Self { statement: "".to_string(), arguments: vec![], query_type: QueryType::Execute, tx: None } + Self { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute, + tx: None, + } } pub fn execute_recv() -> (Self, oneshot::Receiver>) { let (tx, rx) = oneshot::channel(); - (Self { - statement: "".to_string(), - arguments: vec![], - query_type: QueryType::Execute, - tx: Some(tx), - }, rx) + ( + Self { + statement: "".to_string(), + arguments: vec![], + query_type: QueryType::Execute, + tx: Some(tx), + }, + rx, + ) } } @@ -138,7 +157,12 @@ impl<'c> Executor<'c> { } } - self.handle_query_type(query, query_type, &statement, &arguments, tx).await?; + match self.handle_query_type(query, query_type.clone(), &statement, &arguments, tx).await { + Ok(()) => {}, + Err(e) => { + error!(target: LOG_TARGET, r#type = ?query_type, error = %e, "Failed to execute query."); + } + } } } } @@ -229,9 +253,17 @@ impl<'c> Executor<'c> { self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); } QueryType::Execute => { + debug!(target: LOG_TARGET, "Executing query."); + let instant = Instant::now(); let res = self.execute().await; + debug!(target: LOG_TARGET, duration = ?instant.elapsed(), "Executed query."); + if let Some(sender) = sender { - sender.send(res).map_err(|_| anyhow::anyhow!("Failed to send execute result"))?; + sender + .send(res) + .map_err(|_| anyhow::anyhow!("Failed to send execute result"))?; + } else { + res?; } } QueryType::Other => { From 3242ac4d7aa01f5bddd88a5d639f7d8551805b94 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 18:22:37 -0400 Subject: [PATCH 21/51] in memory head --- crates/torii/core/src/engine.rs | 41 ++++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 54f16af359..04bfbbc917 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -109,6 +109,13 @@ pub struct ParallelizedEvent { pub event: Event, } +#[derive(Debug)] +pub struct EngineHead { + pub block_number: u64, + pub last_pending_block_tx: Option, + pub last_pending_block_world_tx: Option, +} + #[allow(missing_debug_implementations)] pub struct Engine { world: Arc>, @@ -150,7 +157,7 @@ impl Engine

{ pub async fn start(&mut self) -> Result<()> { // use the start block provided by user if head is 0 - let (head, _, _) = self.db.head().await?; + let (mut head, mut last_pending_block_tx, mut last_pending_block_world_tx) = self.db.head().await?; if head == 0 { self.db.set_head(self.config.start_block)?; } else if self.config.start_block != 0 { @@ -164,7 +171,6 @@ impl Engine

{ let mut erroring_out = false; loop { - let (head, last_pending_block_world_tx, last_pending_block_tx) = self.db.head().await?; tokio::select! { _ = shutdown_rx.recv() => { break Ok(()); @@ -180,7 +186,14 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(()) => self.db.executor.send(QueryMessage::execute())?, + Ok(res) => { + self.db.executor.send(QueryMessage::execute())?; + if let Some(new_head) = res { + head = new_head.block_number; + last_pending_block_tx = new_head.last_pending_block_tx; + last_pending_block_world_tx = new_head.last_pending_block_world_tx; + } + } Err(e) => { error!(target: LOG_TARGET, error = %e, "Processing fetched data."); erroring_out = true; @@ -364,21 +377,21 @@ impl Engine

{ })) } - pub async fn process(&mut self, fetch_result: FetchDataResult) -> Result<()> { + pub async fn process(&mut self, fetch_result: FetchDataResult) -> Result> { match fetch_result { FetchDataResult::Range(data) => { - self.process_range(data).await?; + self.process_range(data).await.map(Some) } FetchDataResult::Pending(data) => { - self.process_pending(data).await?; + self.process_pending(data).await.map(Some) + } + FetchDataResult::None => { + Ok(None) } - FetchDataResult::None => {} } - - Ok(()) } - pub async fn process_pending(&mut self, data: FetchPendingResult) -> Result<()> { + pub async fn process_pending(&mut self, data: FetchPendingResult) -> Result { // Skip transactions that have been processed already // Our cursor is the last processed transaction @@ -416,7 +429,7 @@ impl Engine

{ if let Some(tx) = last_pending_block_world_tx { self.db.set_last_pending_block_world_tx(Some(tx))?; } - return Ok(()); + return Ok(EngineHead { block_number: data.block_number - 1, last_pending_block_tx, last_pending_block_world_tx }); } _ => { error!(target: LOG_TARGET, error = %e, transaction_hash = %format!("{:#x}", transaction_hash), "Processing pending transaction."); @@ -451,10 +464,10 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(Some(tx))?; } - Ok(()) + Ok(EngineHead { block_number: data.block_number - 1, last_pending_block_tx, last_pending_block_world_tx }) } - pub async fn process_range(&mut self, data: FetchRangeResult) -> Result<()> { + pub async fn process_range(&mut self, data: FetchRangeResult) -> Result { // Process all transactions let mut last_block = 0; for ((block_number, transaction_hash), events) in data.transactions { @@ -493,7 +506,7 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(None)?; self.db.set_last_pending_block_tx(None)?; - Ok(()) + Ok(EngineHead { block_number: data.latest_block_number, last_pending_block_tx: None, last_pending_block_world_tx: None }) } async fn process_tasks(&mut self) -> Result<()> { From ef3e4ba1ae9eb4b2f07faa9136041dbaa4b2cc8f Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 25 Sep 2024 18:22:48 -0400 Subject: [PATCH 22/51] fmt --- crates/torii/core/src/engine.rs | 33 ++++++++++++++++++++------------- 1 file changed, 20 insertions(+), 13 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 04bfbbc917..ef56ad12de 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -157,7 +157,8 @@ impl Engine

{ pub async fn start(&mut self) -> Result<()> { // use the start block provided by user if head is 0 - let (mut head, mut last_pending_block_tx, mut last_pending_block_world_tx) = self.db.head().await?; + let (mut head, mut last_pending_block_tx, mut last_pending_block_world_tx) = + self.db.head().await?; if head == 0 { self.db.set_head(self.config.start_block)?; } else if self.config.start_block != 0 { @@ -379,15 +380,9 @@ impl Engine

{ pub async fn process(&mut self, fetch_result: FetchDataResult) -> Result> { match fetch_result { - FetchDataResult::Range(data) => { - self.process_range(data).await.map(Some) - } - FetchDataResult::Pending(data) => { - self.process_pending(data).await.map(Some) - } - FetchDataResult::None => { - Ok(None) - } + FetchDataResult::Range(data) => self.process_range(data).await.map(Some), + FetchDataResult::Pending(data) => self.process_pending(data).await.map(Some), + FetchDataResult::None => Ok(None), } } @@ -429,7 +424,11 @@ impl Engine

{ if let Some(tx) = last_pending_block_world_tx { self.db.set_last_pending_block_world_tx(Some(tx))?; } - return Ok(EngineHead { block_number: data.block_number - 1, last_pending_block_tx, last_pending_block_world_tx }); + return Ok(EngineHead { + block_number: data.block_number - 1, + last_pending_block_tx, + last_pending_block_world_tx, + }); } _ => { error!(target: LOG_TARGET, error = %e, transaction_hash = %format!("{:#x}", transaction_hash), "Processing pending transaction."); @@ -464,7 +463,11 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(Some(tx))?; } - Ok(EngineHead { block_number: data.block_number - 1, last_pending_block_tx, last_pending_block_world_tx }) + Ok(EngineHead { + block_number: data.block_number - 1, + last_pending_block_tx, + last_pending_block_world_tx, + }) } pub async fn process_range(&mut self, data: FetchRangeResult) -> Result { @@ -506,7 +509,11 @@ impl Engine

{ self.db.set_last_pending_block_world_tx(None)?; self.db.set_last_pending_block_tx(None)?; - Ok(EngineHead { block_number: data.latest_block_number, last_pending_block_tx: None, last_pending_block_world_tx: None }) + Ok(EngineHead { + block_number: data.latest_block_number, + last_pending_block_tx: None, + last_pending_block_world_tx: None, + }) } async fn process_tasks(&mut self) -> Result<()> { From 299c0b92a356f1d914957da401c166df16649529 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 10:44:20 -0400 Subject: [PATCH 23/51] fix: tests --- bin/torii/src/main.rs | 2 +- crates/torii/core/src/sql_test.rs | 6 +++--- crates/torii/graphql/src/tests/mod.rs | 2 +- crates/torii/grpc/src/server/tests/entities_test.rs | 2 +- crates/torii/libp2p/src/tests.rs | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index d27ad18fda..48bfd6dee4 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -65,7 +65,7 @@ struct Args { /// Database filepath (ex: indexer.db). If specified file doesn't exist, it will be /// created. Defaults to in-memory database - #[arg(short, long, default_value = ":memory:")] + #[arg(short, long, default_value = "")] database: String, /// Specify a block to start indexing from, ignored if stored head exists diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index dcc95fca75..9bca9f108b 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -70,7 +70,7 @@ where #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { let options = - SqliteConnectOptions::from_str("sqlite::memory:").unwrap().create_if_missing(true); + SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -220,7 +220,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let options = - SqliteConnectOptions::from_str("sqlite::memory:").unwrap().create_if_missing(true); + SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -323,7 +323,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { let options = - SqliteConnectOptions::from_str("sqlite::memory:").unwrap().create_if_missing(true); + SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 374bb4c0d6..93def6b581 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -278,7 +278,7 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test() -> Result { // change sqlite::memory: to sqlite:~/.test.db to dump database to disk let options = - SqliteConnectOptions::from_str("sqlite::memory:")?.create_if_missing(true).with_regexp(); + SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 6ef223cb26..e6e5b03372 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -33,7 +33,7 @@ use crate::types::schema::Entity; #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("sqlite::memory:") + let options = SqliteConnectOptions::from_str("") .unwrap() .create_if_missing(true) .with_regexp(); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index cdd71d5ff3..acb96518b8 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -549,7 +549,7 @@ mod test { .try_init(); // Database - let options = ::from_str("sqlite::memory:") + let options = ::from_str("") .unwrap() .create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); From e4404f16adf1867d795d88a488092bf66de413c1 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 10:46:18 -0400 Subject: [PATCH 24/51] fixx: libp2p --- crates/torii/libp2p/src/server/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/crates/torii/libp2p/src/server/mod.rs b/crates/torii/libp2p/src/server/mod.rs index e69776c865..11f0489687 100644 --- a/crates/torii/libp2p/src/server/mod.rs +++ b/crates/torii/libp2p/src/server/mod.rs @@ -25,6 +25,7 @@ use starknet::core::types::{BlockId, BlockTag, Felt, FunctionCall}; use starknet::core::utils::get_selector_from_name; use starknet::providers::Provider; use starknet_crypto::poseidon_hash_many; +use torii_core::executor::QueryMessage; use torii_core::sql::{felts_sql_string, Sql}; use tracing::{info, warn}; use webrtc::tokio::Certificate; @@ -529,6 +530,7 @@ async fn set_entity( keys: &str, ) -> anyhow::Result<()> { db.set_entity(ty, message_id, block_timestamp, entity_id, model_id, Some(keys)).await?; + db.executor.send(QueryMessage::execute())?; Ok(()) } From 663234a6f30e7045d98ee3c0a70761b24e266fe0 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 10:47:03 -0400 Subject: [PATCH 25/51] fmt --- crates/torii/core/src/sql_test.rs | 9 +++------ crates/torii/graphql/src/tests/mod.rs | 3 +-- crates/torii/grpc/src/server/tests/entities_test.rs | 5 +---- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 9bca9f108b..2ab354017f 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -69,8 +69,7 @@ where #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { - let options = - SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); + let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -219,8 +218,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { - let options = - SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); + let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -322,8 +320,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { - let options = - SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); + let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 93def6b581..0a36bde6ca 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -277,8 +277,7 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test() -> Result { // change sqlite::memory: to sqlite:~/.test.db to dump database to disk - let options = - SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); + let options = SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index e6e5b03372..e999d9df74 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -33,10 +33,7 @@ use crate::types::schema::Entity; #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("") - .unwrap() - .create_if_missing(true) - .with_regexp(); + let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); From 994abc561006f4ccaf174ef71404798715b86547 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 12:15:17 -0400 Subject: [PATCH 26/51] try fix libp2p test --- crates/torii/libp2p/src/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index acb96518b8..b2cc99720f 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -552,7 +552,7 @@ mod test { let options = ::from_str("") .unwrap() .create_if_missing(true); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let sequencer = KatanaRunner::new().expect("Failed to create Katana sequencer"); From 65612facb23259fc91187269e4d66caf19e79e0e Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 12:31:33 -0400 Subject: [PATCH 27/51] fix tests --- crates/torii/core/src/sql_test.rs | 6 +++--- crates/torii/graphql/src/tests/mod.rs | 2 +- crates/torii/grpc/src/server/tests/entities_test.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 2ab354017f..c4b7a6baf4 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -70,7 +70,7 @@ where #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); @@ -219,7 +219,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); @@ -321,7 +321,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 0a36bde6ca..427fe91210 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -278,7 +278,7 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test() -> Result { // change sqlite::memory: to sqlite:~/.test.db to dump database to disk let options = SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_paths("../../dojo-core", &["../types-test"]); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index e999d9df74..78482b74f5 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -34,7 +34,7 @@ use crate::types::schema::Entity; #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new().max_connections(5).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); From 13b1ba71dd99cbe287274e1251ae08efc370dc85 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 12:31:49 -0400 Subject: [PATCH 28/51] fmt --- crates/torii/core/src/sql_test.rs | 24 ++++++++++++++++--- crates/torii/graphql/src/tests/mod.rs | 8 ++++++- .../grpc/src/server/tests/entities_test.rs | 8 ++++++- crates/torii/libp2p/src/tests.rs | 8 ++++++- 4 files changed, 42 insertions(+), 6 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index c4b7a6baf4..13f9898ee2 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -70,7 +70,13 @@ where #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); @@ -219,7 +225,13 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); @@ -321,7 +333,13 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 427fe91210..858ef514fa 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -278,7 +278,13 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test() -> Result { // change sqlite::memory: to sqlite:~/.test.db to dump database to disk let options = SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_paths("../../dojo-core", &["../types-test"]); diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 78482b74f5..c7eb0d94ca 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -34,7 +34,13 @@ use crate::types::schema::Entity; #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index b2cc99720f..9749db0dc0 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -552,7 +552,13 @@ mod test { let options = ::from_str("") .unwrap() .create_if_missing(true); - let pool = SqlitePoolOptions::new().min_connections(1).idle_timeout(None).max_lifetime(None).connect_with(options).await.unwrap(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let sequencer = KatanaRunner::new().expect("Failed to create Katana sequencer"); From 0c31327dba8f7eefa36a1f2a3afdb802573f19ce Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 13:22:34 -0400 Subject: [PATCH 29/51] use tempfile for tests --- Cargo.lock | 2 + crates/torii/core/Cargo.toml | 1 + crates/torii/core/src/sql_test.rs | 46 ++++++++----------- crates/torii/graphql/Cargo.toml | 1 + crates/torii/graphql/src/tests/mod.rs | 6 ++- .../grpc/src/server/tests/entities_test.rs | 10 +--- crates/torii/libp2p/src/tests.rs | 5 +- 7 files changed, 32 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8152a03bd0..5ed1462396 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14872,6 +14872,7 @@ dependencies = [ "sqlx", "starknet 0.12.0", "starknet-crypto 0.7.2", + "tempfile", "thiserror", "tokio", "tokio-stream", @@ -14911,6 +14912,7 @@ dependencies = [ "starknet-crypto 0.7.2", "strum 0.25.0", "strum_macros 0.25.3", + "tempfile", "thiserror", "tokio", "tokio-stream", diff --git a/crates/torii/core/Cargo.toml b/crates/torii/core/Cargo.toml index d21c4c06b0..c04b528075 100644 --- a/crates/torii/core/Cargo.toml +++ b/crates/torii/core/Cargo.toml @@ -48,3 +48,4 @@ dojo-test-utils = { path = "../../dojo-test-utils" } dojo-utils.workspace = true katana-runner.workspace = true scarb.workspace = true +tempfile.workspace = true diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 13f9898ee2..48e9c56988 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -11,12 +11,14 @@ use dojo_world::contracts::world::{WorldContract, WorldContractReader}; use katana_runner::RunnerCtx; use scarb::compiler::Profile; use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; +use sqlx::SqlitePool; use starknet::accounts::Account; use starknet::core::types::{Call, Felt}; use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; @@ -29,6 +31,20 @@ use crate::processors::store_update_member::StoreUpdateMemberProcessor; use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; +pub async fn setup_sqlite_pool() -> Result> { + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await?; + sqlx::migrate!("../migrations").run(&pool).await?; + Ok(pool) +} + pub async fn bootstrap_engine

( world: WorldContractReader

, db: Sql, @@ -69,15 +85,7 @@ where #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await - .unwrap(); - sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); @@ -224,15 +232,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await - .unwrap(); - sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); @@ -332,15 +332,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await - .unwrap(); - sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); diff --git a/crates/torii/graphql/Cargo.toml b/crates/torii/graphql/Cargo.toml index 464dddefb6..c97e177af5 100644 --- a/crates/torii/graphql/Cargo.toml +++ b/crates/torii/graphql/Cargo.toml @@ -48,3 +48,4 @@ scarb.workspace = true serial_test = "2.0.0" starknet-crypto.workspace = true starknet.workspace = true +tempfile.workspace = true diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 858ef514fa..c7d96baff5 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -24,6 +24,7 @@ use starknet::core::types::{Call, Felt, InvokeTransactionResult}; use starknet::macros::selector; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, Processors}; @@ -276,8 +277,9 @@ pub async fn model_fixtures(db: &mut Sql) { } pub async fn spinup_types_test() -> Result { - // change sqlite::memory: to sqlite:~/.test.db to dump database to disk - let options = SqliteConnectOptions::from_str("")?.create_if_missing(true).with_regexp(); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new() .min_connections(1) .idle_timeout(None) diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index c7eb0d94ca..2cf1d2624f 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -33,15 +33,7 @@ use crate::types::schema::Entity; #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { - let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await - .unwrap(); - sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); diff --git a/crates/torii/libp2p/src/tests.rs b/crates/torii/libp2p/src/tests.rs index 9749db0dc0..069f82997b 100644 --- a/crates/torii/libp2p/src/tests.rs +++ b/crates/torii/libp2p/src/tests.rs @@ -534,6 +534,7 @@ mod test { use starknet::providers::JsonRpcClient; use starknet::signers::SigningKey; use starknet_crypto::Felt; + use tempfile::NamedTempFile; use tokio::select; use tokio::sync::broadcast; use tokio::time::sleep; @@ -549,7 +550,9 @@ mod test { .try_init(); // Database - let options = ::from_str("") + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = ::from_str(&path) .unwrap() .create_if_missing(true); let pool = SqlitePoolOptions::new() From afa2a0a7fa335ae29514beb54909e81905926d1d Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 13:33:32 -0400 Subject: [PATCH 30/51] fix --- Cargo.lock | 1 + crates/torii/graphql/src/tests/mod.rs | 3 ++- crates/torii/grpc/Cargo.toml | 1 + .../torii/grpc/src/server/tests/entities_test.rs | 14 +++++++++++++- 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ed1462396..1d34aa64a9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14957,6 +14957,7 @@ dependencies = [ "starknet-crypto 0.7.2", "strum 0.25.0", "strum_macros 0.25.3", + "tempfile", "thiserror", "tokio", "tokio-stream", diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index c7d96baff5..15c05e8bbc 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -279,7 +279,8 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test() -> Result { let tempfile = NamedTempFile::new().unwrap(); let path = tempfile.path().to_string_lossy(); - let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); + let options = + SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new() .min_connections(1) .idle_timeout(None) diff --git a/crates/torii/grpc/Cargo.toml b/crates/torii/grpc/Cargo.toml index c4eb6021e7..08cb2ae45e 100644 --- a/crates/torii/grpc/Cargo.toml +++ b/crates/torii/grpc/Cargo.toml @@ -41,6 +41,7 @@ dojo-utils.workspace = true katana-runner.workspace = true scarb.workspace = true sozo-ops.workspace = true +tempfile.workspace = true [target.'cfg(target_arch = "wasm32")'.dependencies] tonic-web-wasm-client.workspace = true diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 2cf1d2624f..d8b7b759d2 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -18,6 +18,7 @@ use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use torii_core::engine::{Engine, EngineConfig, Processors}; use torii_core::executor::Executor; @@ -33,7 +34,18 @@ use crate::types::schema::Entity; #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_entities_queries(sequencer: &RunnerCtx) { - let pool = setup_sqlite_pool().await.unwrap(); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = + SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); + let pool = SqlitePoolOptions::new() + .min_connections(1) + .idle_timeout(None) + .max_lifetime(None) + .connect_with(options) + .await + .unwrap(); + sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); From ef9fafc84b9a428a843ea2361a9a315ec574f452 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 13:55:05 -0400 Subject: [PATCH 31/51] c --- crates/torii/core/src/sql_test.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 48e9c56988..f37f46b225 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -158,7 +158,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); @@ -317,7 +317,7 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await; @@ -405,7 +405,7 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { executor.run().await.unwrap(); }); - let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); + let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); } From 4ec379c2ccd4ccada943c59c02a79518d77b08b7 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 14:39:58 -0400 Subject: [PATCH 32/51] fix: sql tests --- crates/torii/core/src/sql_test.rs | 67 +++++++++++-------------------- 1 file changed, 24 insertions(+), 43 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index f37f46b225..1f3372ad73 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -11,7 +11,6 @@ use dojo_world::contracts::world::{WorldContract, WorldContractReader}; use katana_runner::RunnerCtx; use scarb::compiler::Profile; use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; -use sqlx::SqlitePool; use starknet::accounts::Account; use starknet::core::types::{Call, Felt}; use starknet::core::utils::{get_contract_address, get_selector_from_name}; @@ -31,7 +30,7 @@ use crate::processors::store_update_member::StoreUpdateMemberProcessor; use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; -pub async fn setup_sqlite_pool() -> Result> { +pub async fn setup_sqlite_pool(world_address: Felt) -> Result> { let tempfile = NamedTempFile::new().unwrap(); let path = tempfile.path().to_string_lossy(); let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); @@ -42,7 +41,16 @@ pub async fn setup_sqlite_pool() -> Result( @@ -85,8 +93,6 @@ where #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote(sequencer: &RunnerCtx) { - let pool = setup_sqlite_pool().await.unwrap(); - let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); @@ -151,19 +157,12 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - - let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); - tokio::spawn(async move { - executor.run().await.unwrap(); - }); - - let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); + let db = setup_sqlite_pool(world_reader.address).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); let _block_timestamp = 1710754478_u64; - let models = sqlx::query("SELECT * FROM models").fetch_all(&pool).await.unwrap(); + let models = sqlx::query("SELECT * FROM models").fetch_all(&db.pool).await.unwrap(); assert_eq!(models.len(), 10); let (id, name, namespace, packed_size, unpacked_size): (String, String, String, u8, u8) = @@ -171,7 +170,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'Position'", ) - .fetch_one(&pool) + .fetch_one(&db.pool) .await .unwrap(); @@ -186,7 +185,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'Moves'", ) - .fetch_one(&pool) + .fetch_one(&db.pool) .await .unwrap(); @@ -201,7 +200,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'PlayerConfig'", ) - .fetch_one(&pool) + .fetch_one(&db.pool) .await .unwrap(); @@ -211,8 +210,8 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { assert_eq!(packed_size, 0); assert_eq!(unpacked_size, 0); - assert_eq!(count_table("entities", &pool).await, 2); - assert_eq!(count_table("event_messages", &pool).await, 1); + assert_eq!(count_table("entities", &db.pool).await, 2); + assert_eq!(count_table("event_messages", &db.pool).await, 1); let (id, keys): (String, String) = sqlx::query_as( format!( @@ -221,7 +220,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { ) .as_str(), ) - .fetch_one(&pool) + .fetch_one(&db.pool) .await .unwrap(); @@ -232,8 +231,6 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_load_from_remote_del(sequencer: &RunnerCtx) { - let pool = setup_sqlite_pool().await.unwrap(); - let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); @@ -310,20 +307,13 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { TransactionWaiter::new(res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - - let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); - tokio::spawn(async move { - executor.run().await.unwrap(); - }); - - let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); + let db = setup_sqlite_pool(world_reader.address).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await; - assert_eq!(count_table("dojo_examples-PlayerConfig", &pool).await, 0); - assert_eq!(count_table("dojo_examples-PlayerConfig$favorite_item", &pool).await, 0); - assert_eq!(count_table("dojo_examples-PlayerConfig$items", &pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig", &db.pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig$favorite_item", &db.pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig$items", &db.pool).await, 0); // TODO: check how we can have a test that is more chronological with Torii re-syncing // to ensure we can test intermediate states. @@ -332,8 +322,6 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { #[tokio::test(flavor = "multi_thread")] #[katana_runner::test(accounts = 10, db_dir = copy_spawn_and_move_db().as_str())] async fn test_update_with_set_record(sequencer: &RunnerCtx) { - let pool = setup_sqlite_pool().await.unwrap(); - let setup = CompilerTestSetup::from_examples("../../dojo-core", "../../../examples/"); let config = setup.build_test_config("spawn-and-move", Profile::DEV); @@ -398,14 +386,7 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { TransactionWaiter::new(move_res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - - let (shutdown_tx, _) = broadcast::channel(1); - let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); - tokio::spawn(async move { - executor.run().await.unwrap(); - }); - - let db = Sql::new(pool.clone(), world_reader.address, sender).await.unwrap(); + let db = setup_sqlite_pool(world_reader.address).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); } From d393896407f6f403792b6587988d9de865ed233f Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 14:53:32 -0400 Subject: [PATCH 33/51] clone --- crates/torii/core/src/sql_test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 1f3372ad73..073342684d 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -48,7 +48,7 @@ pub async fn setup_sqlite_pool(world_address: Felt) -> Result Date: Fri, 27 Sep 2024 14:54:19 -0400 Subject: [PATCH 34/51] fmt --- crates/torii/core/src/sql_test.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 073342684d..e263e3991d 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -41,7 +41,7 @@ pub async fn setup_sqlite_pool(world_address: Felt) -> Result Date: Fri, 27 Sep 2024 15:11:24 -0400 Subject: [PATCH 35/51] fmt --- crates/torii/core/src/sql_test.rs | 40 +++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index e263e3991d..4748e85401 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -11,6 +11,7 @@ use dojo_world::contracts::world::{WorldContract, WorldContractReader}; use katana_runner::RunnerCtx; use scarb::compiler::Profile; use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; +use sqlx::SqlitePool; use starknet::accounts::Account; use starknet::core::types::{Call, Felt}; use starknet::core::utils::{get_contract_address, get_selector_from_name}; @@ -30,7 +31,7 @@ use crate::processors::store_update_member::StoreUpdateMemberProcessor; use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; -pub async fn setup_sqlite_pool(world_address: Felt) -> Result> { +pub async fn setup_sqlite_pool() -> Result> { let tempfile = NamedTempFile::new().unwrap(); let path = tempfile.path().to_string_lossy(); let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); @@ -42,15 +43,7 @@ pub async fn setup_sqlite_pool(world_address: Felt) -> Result( @@ -157,7 +150,14 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let db = setup_sqlite_pool(world_reader.address).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); @@ -307,7 +307,14 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { TransactionWaiter::new(res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let db = setup_sqlite_pool(world_reader.address).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), provider).await; @@ -386,7 +393,14 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { TransactionWaiter::new(move_res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let db = setup_sqlite_pool(world_reader.address).await.unwrap(); + let pool = setup_sqlite_pool().await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); + let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + + let db = Sql::new(pool.clone(), world_reader.address, sender.clone()).await.unwrap(); let _ = bootstrap_engine(world_reader, db.clone(), Arc::clone(&provider)).await.unwrap(); } From 7758cf9cb0efc86bb6984d4437c5d6d4345683e1 Mon Sep 17 00:00:00 2001 From: Nasr Date: Fri, 27 Sep 2024 15:25:24 -0400 Subject: [PATCH 36/51] no temp file --- crates/torii/core/src/sql_test.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 4748e85401..80b91ba437 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -18,7 +18,6 @@ use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; -use tempfile::NamedTempFile; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; @@ -32,9 +31,7 @@ use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; pub async fn setup_sqlite_pool() -> Result> { - let tempfile = NamedTempFile::new().unwrap(); - let path = tempfile.path().to_string_lossy(); - let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); + let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new() .min_connections(1) .idle_timeout(None) From 607cd0605ea1045e1e5906db4eb6f4cde4d2c83c Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 11:30:34 -0400 Subject: [PATCH 37/51] tmp file --- crates/torii/core/src/sql_test.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 80b91ba437..4748e85401 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -18,6 +18,7 @@ use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; @@ -31,7 +32,9 @@ use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; pub async fn setup_sqlite_pool() -> Result> { - let options = SqliteConnectOptions::from_str("").unwrap().create_if_missing(true); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new() .min_connections(1) .idle_timeout(None) From d48dd30c6c349d2c7d8cd6d60c884508cc263e54 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 12:07:46 -0400 Subject: [PATCH 38/51] fix: lock issues --- crates/torii/core/src/sql.rs | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 7bee0d10ac..64899fae59 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -42,22 +42,26 @@ impl Sql { world_address: Felt, executor: UnboundedSender, ) -> Result { - sqlx::query( + executor.send(QueryMessage::other( "INSERT OR IGNORE INTO contracts (id, contract_address, contract_type) VALUES (?, ?, \ - ?)", - ) - .bind(format!("{:#x}", world_address)) - .bind(format!("{:#x}", world_address)) - .bind(WORLD_CONTRACT_TYPE) - .execute(&pool) - .await?; - - Ok(Self { + ?)" + .to_string(), + vec![ + Argument::FieldElement(world_address), + Argument::FieldElement(world_address), + Argument::String(WORLD_CONTRACT_TYPE.to_string()), + ], + ))?; + + let db = Self { pool: pool.clone(), world_address, executor, model_cache: Arc::new(ModelCache::new(pool)), - }) + }; + db.execute().await?; + + Ok(db) } pub async fn head(&self) -> Result<(u64, Option, Option)> { From 6d4b99fbb73c6ad65e5994718e436fffe03f9e93 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 13:17:13 -0400 Subject: [PATCH 39/51] manuallyt use tmp file --- crates/torii/core/src/executor.rs | 2 +- crates/torii/core/src/sql_test.rs | 61 ++++++++++++++++--------------- 2 files changed, 32 insertions(+), 31 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index 4abb381327..a9dee8bdf2 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -140,7 +140,7 @@ impl<'c> Executor<'c> { loop { tokio::select! { _ = self.shutdown_rx.recv() => { - println!("Shutting down executor"); + debug!(target: LOG_TARGET, "Shutting down executor"); break Ok(()); } Some(msg) = self.rx.recv() => { diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 4748e85401..68399add0c 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -11,14 +11,12 @@ use dojo_world::contracts::world::{WorldContract, WorldContractReader}; use katana_runner::RunnerCtx; use scarb::compiler::Profile; use sqlx::sqlite::{SqliteConnectOptions, SqlitePoolOptions}; -use sqlx::SqlitePool; use starknet::accounts::Account; use starknet::core::types::{Call, Felt}; use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; -use tempfile::NamedTempFile; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; @@ -31,21 +29,6 @@ use crate::processors::store_update_member::StoreUpdateMemberProcessor; use crate::processors::store_update_record::StoreUpdateRecordProcessor; use crate::sql::Sql; -pub async fn setup_sqlite_pool() -> Result> { - let tempfile = NamedTempFile::new().unwrap(); - let path = tempfile.path().to_string_lossy(); - let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await?; - sqlx::migrate!("../migrations").run(&pool).await?; - - Ok(pool) -} - pub async fn bootstrap_engine

( world: WorldContractReader

, db: Sql, @@ -150,7 +133,13 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { TransactionWaiter::new(tx.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let pool = setup_sqlite_pool().await.unwrap(); + + let options = SqliteConnectOptions::from_str("/tmp/test_load_from_remote.db") + .unwrap() + .create_if_missing(true); + let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); + sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { @@ -162,7 +151,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { let _ = bootstrap_engine(world_reader, db.clone(), provider).await.unwrap(); let _block_timestamp = 1710754478_u64; - let models = sqlx::query("SELECT * FROM models").fetch_all(&db.pool).await.unwrap(); + let models = sqlx::query("SELECT * FROM models").fetch_all(&pool).await.unwrap(); assert_eq!(models.len(), 10); let (id, name, namespace, packed_size, unpacked_size): (String, String, String, u8, u8) = @@ -170,7 +159,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'Position'", ) - .fetch_one(&db.pool) + .fetch_one(&pool) .await .unwrap(); @@ -185,7 +174,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'Moves'", ) - .fetch_one(&db.pool) + .fetch_one(&pool) .await .unwrap(); @@ -200,7 +189,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { "SELECT id, name, namespace, packed_size, unpacked_size FROM models WHERE name = \ 'PlayerConfig'", ) - .fetch_one(&db.pool) + .fetch_one(&pool) .await .unwrap(); @@ -210,8 +199,8 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { assert_eq!(packed_size, 0); assert_eq!(unpacked_size, 0); - assert_eq!(count_table("entities", &db.pool).await, 2); - assert_eq!(count_table("event_messages", &db.pool).await, 1); + assert_eq!(count_table("entities", &pool).await, 2); + assert_eq!(count_table("event_messages", &pool).await, 2); let (id, keys): (String, String) = sqlx::query_as( format!( @@ -220,7 +209,7 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { ) .as_str(), ) - .fetch_one(&db.pool) + .fetch_one(&pool) .await .unwrap(); @@ -307,7 +296,13 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { TransactionWaiter::new(res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let pool = setup_sqlite_pool().await.unwrap(); + + let options = SqliteConnectOptions::from_str("/tmp/test_load_from_remote_del.db") + .unwrap() + .create_if_missing(true); + let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); + sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { @@ -318,9 +313,9 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let _ = bootstrap_engine(world_reader, db.clone(), provider).await; - assert_eq!(count_table("dojo_examples-PlayerConfig", &db.pool).await, 0); - assert_eq!(count_table("dojo_examples-PlayerConfig$favorite_item", &db.pool).await, 0); - assert_eq!(count_table("dojo_examples-PlayerConfig$items", &db.pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig", &pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig$favorite_item", &pool).await, 0); + assert_eq!(count_table("dojo_examples-PlayerConfig$items", &pool).await, 0); // TODO: check how we can have a test that is more chronological with Torii re-syncing // to ensure we can test intermediate states. @@ -393,7 +388,13 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { TransactionWaiter::new(move_res.transaction_hash, &provider).await.unwrap(); let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let pool = setup_sqlite_pool().await.unwrap(); + + let options = SqliteConnectOptions::from_str("/tmp/test_update_with_set_record.db") + .unwrap() + .create_if_missing(true); + let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); + sqlx::migrate!("../migrations").run(&pool).await.unwrap(); + let (shutdown_tx, _) = broadcast::channel(1); let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await.unwrap(); tokio::spawn(async move { From baf7f35c8afd6f52c78f328828d120c21a01ae23 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 13:33:30 -0400 Subject: [PATCH 40/51] fix graphql tests --- crates/torii/graphql/src/tests/entities_test.rs | 5 ++++- crates/torii/graphql/src/tests/mod.rs | 13 ++----------- .../torii/graphql/src/tests/models_ordering_test.rs | 5 ++++- crates/torii/graphql/src/tests/models_test.rs | 5 ++++- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/crates/torii/graphql/src/tests/entities_test.rs b/crates/torii/graphql/src/tests/entities_test.rs index 6138aac846..efd74ab723 100644 --- a/crates/torii/graphql/src/tests/entities_test.rs +++ b/crates/torii/graphql/src/tests/entities_test.rs @@ -5,6 +5,7 @@ mod tests { use serde_json::Value; use starknet::core::types::Felt; use starknet_crypto::poseidon_hash_many; + use tempfile::NamedTempFile; use crate::schema::build_schema; use crate::tests::{ @@ -90,7 +91,9 @@ mod tests { // to run so combine all related tests into one #[tokio::test(flavor = "multi_thread")] async fn entities_test() -> Result<()> { - let pool = spinup_types_test().await?; + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let pool = spinup_types_test(&path).await?; let schema = build_schema(&pool).await.unwrap(); // default without params diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 15c05e8bbc..0c75789ed6 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -24,7 +24,6 @@ use starknet::core::types::{Call, Felt, InvokeTransactionResult}; use starknet::macros::selector; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; -use tempfile::NamedTempFile; use tokio::sync::broadcast; use tokio_stream::StreamExt; use torii_core::engine::{Engine, EngineConfig, Processors}; @@ -276,18 +275,10 @@ pub async fn model_fixtures(db: &mut Sql) { db.execute().await.unwrap(); } -pub async fn spinup_types_test() -> Result { - let tempfile = NamedTempFile::new().unwrap(); - let path = tempfile.path().to_string_lossy(); +pub async fn spinup_types_test(path: &str) -> Result { let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); - let pool = SqlitePoolOptions::new() - .min_connections(1) - .idle_timeout(None) - .max_lifetime(None) - .connect_with(options) - .await - .unwrap(); + let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); let setup = CompilerTestSetup::from_paths("../../dojo-core", &["../types-test"]); diff --git a/crates/torii/graphql/src/tests/models_ordering_test.rs b/crates/torii/graphql/src/tests/models_ordering_test.rs index 9b4abdf26e..42182182fa 100644 --- a/crates/torii/graphql/src/tests/models_ordering_test.rs +++ b/crates/torii/graphql/src/tests/models_ordering_test.rs @@ -3,6 +3,7 @@ mod tests { use anyhow::Result; use async_graphql::dynamic::Schema; use serde_json::Value; + use tempfile::NamedTempFile; use crate::schema::build_schema; use crate::tests::{run_graphql_query, spinup_types_test, Connection, WorldModel}; @@ -44,7 +45,9 @@ mod tests { // to run so combine all related tests into one #[tokio::test(flavor = "multi_thread")] async fn models_ordering_test() -> Result<()> { - let pool = spinup_types_test().await?; + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let pool = spinup_types_test(&path).await?; let schema = build_schema(&pool).await.unwrap(); // default params, test entity relationship, test nested types diff --git a/crates/torii/graphql/src/tests/models_test.rs b/crates/torii/graphql/src/tests/models_test.rs index 163d9afc41..78cd6f5458 100644 --- a/crates/torii/graphql/src/tests/models_test.rs +++ b/crates/torii/graphql/src/tests/models_test.rs @@ -6,6 +6,7 @@ mod tests { use async_graphql::dynamic::Schema; use serde_json::Value; use starknet::core::types::Felt; + use tempfile::NamedTempFile; use crate::schema::build_schema; use crate::tests::{ @@ -166,7 +167,9 @@ mod tests { #[allow(clippy::get_first)] #[tokio::test(flavor = "multi_thread")] async fn models_test() -> Result<()> { - let pool = spinup_types_test().await?; + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let pool = spinup_types_test(&path).await?; let schema = build_schema(&pool).await.unwrap(); // we need to order all the records because insertions are done in parallel From c4f288a6aeec1e617615916cf2617cc487536509 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 13:39:03 -0400 Subject: [PATCH 41/51] fix: tests --- crates/torii/core/src/sql_test.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/crates/torii/core/src/sql_test.rs b/crates/torii/core/src/sql_test.rs index 68399add0c..499fd0adf8 100644 --- a/crates/torii/core/src/sql_test.rs +++ b/crates/torii/core/src/sql_test.rs @@ -17,6 +17,7 @@ use starknet::core::utils::{get_contract_address, get_selector_from_name}; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::{JsonRpcClient, Provider}; use starknet_crypto::poseidon_hash_many; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use crate::engine::{Engine, EngineConfig, Processors}; @@ -134,9 +135,9 @@ async fn test_load_from_remote(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let options = SqliteConnectOptions::from_str("/tmp/test_load_from_remote.db") - .unwrap() - .create_if_missing(true); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -297,9 +298,9 @@ async fn test_load_from_remote_del(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let options = SqliteConnectOptions::from_str("/tmp/test_load_from_remote_del.db") - .unwrap() - .create_if_missing(true); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); @@ -389,9 +390,9 @@ async fn test_update_with_set_record(sequencer: &RunnerCtx) { let world_reader = WorldContractReader::new(strat.world_address, Arc::clone(&provider)); - let options = SqliteConnectOptions::from_str("/tmp/test_update_with_set_record.db") - .unwrap() - .create_if_missing(true); + let tempfile = NamedTempFile::new().unwrap(); + let path = tempfile.path().to_string_lossy(); + let options = SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true); let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); From 5dac2205aed4c0c088236ea43b9634f0cec5ebfd Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 13:45:41 -0400 Subject: [PATCH 42/51] clippy --- crates/torii/graphql/src/tests/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/torii/graphql/src/tests/mod.rs b/crates/torii/graphql/src/tests/mod.rs index 0c75789ed6..7fe949881c 100644 --- a/crates/torii/graphql/src/tests/mod.rs +++ b/crates/torii/graphql/src/tests/mod.rs @@ -277,7 +277,7 @@ pub async fn model_fixtures(db: &mut Sql) { pub async fn spinup_types_test(path: &str) -> Result { let options = - SqliteConnectOptions::from_str(&path).unwrap().create_if_missing(true).with_regexp(); + SqliteConnectOptions::from_str(path).unwrap().create_if_missing(true).with_regexp(); let pool = SqlitePoolOptions::new().connect_with(options).await.unwrap(); sqlx::migrate!("../migrations").run(&pool).await.unwrap(); From 28633b448030e51a1b183ae1491f96b8f361380b Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 16:01:35 -0400 Subject: [PATCH 43/51] fix torii bin --- bin/torii/src/main.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 48bfd6dee4..00cf4b4ce5 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -187,6 +187,10 @@ async fn main() -> anyhow::Result<()> { let world = WorldContractReader::new(args.world_address, provider.clone()); let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await?; + tokio::spawn(async move { + executor.run().await.unwrap(); + }); + let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?; let processors = Processors { @@ -291,7 +295,6 @@ async fn main() -> anyhow::Result<()> { tokio::select! { res = engine.start() => res?, - _ = executor.run() => {}, _ = proxy_server.start(shutdown_tx.subscribe()) => {}, _ = graphql_server => {}, _ = grpc_server => {}, From fd3c3778c7584aa76398cd8772f4bfa1ba0b73a5 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 17:02:00 -0400 Subject: [PATCH 44/51] engine executions --- crates/torii/core/src/engine.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index ef56ad12de..112a42051e 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -448,6 +448,9 @@ impl Engine

{ } } + // Directly commit non entity related queries + self.db.execute().await?; + // Process parallelized events self.process_tasks().await?; @@ -502,6 +505,9 @@ impl Engine

{ } } + // Directly commit non entity related queries + self.db.execute().await?; + // Process parallelized events self.process_tasks().await?; @@ -529,7 +535,7 @@ impl Engine

{ let semaphore = semaphore.clone(); handles.push(tokio::spawn(async move { - let _permit = semaphore.acquire().await.unwrap(); + let _permit = semaphore.acquire().await?; let mut local_db = db.clone(); for ParallelizedEvent { event_id, event, block_number, block_timestamp } in events { if let Some(processor) = processors.event.get(&event.keys[0]) { @@ -543,6 +549,8 @@ impl Engine

{ } } } + + local_db.execute().await?; Ok::<_, anyhow::Error>(local_db) })); } From 4cabea5ea9b5c5b615ea5a0c2768b6230ef8afd9 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 17:26:08 -0400 Subject: [PATCH 45/51] use tmp file for db --- Cargo.lock | 1 + bin/torii/Cargo.toml | 1 + bin/torii/src/main.rs | 14 +++++++++++++- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 1d34aa64a9..79d9ff6aa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14791,6 +14791,7 @@ dependencies = [ "sqlx", "starknet 0.12.0", "starknet-crypto 0.7.2", + "tempfile", "tokio", "tokio-stream", "tokio-util", diff --git a/bin/torii/Cargo.toml b/bin/torii/Cargo.toml index 977764b26d..aadbd390cd 100644 --- a/bin/torii/Cargo.toml +++ b/bin/torii/Cargo.toml @@ -46,6 +46,7 @@ tracing-subscriber.workspace = true tracing.workspace = true url.workspace = true webbrowser = "0.8" +tempfile.workspace = true [dev-dependencies] camino.workspace = true diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 00cf4b4ce5..30c368f047 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -27,6 +27,7 @@ use sqlx::SqlitePool; use starknet::core::types::Felt; use starknet::providers::jsonrpc::HttpTransport; use starknet::providers::JsonRpcClient; +use tempfile::NamedTempFile; use tokio::sync::broadcast; use tokio::sync::broadcast::Sender; use tokio_stream::StreamExt; @@ -164,8 +165,19 @@ async fn main() -> anyhow::Result<()> { }) .expect("Error setting Ctrl-C handler"); + let mut tempfile = None; + if args.database.is_empty() { + tempfile = Some(NamedTempFile::new()?); + } + + let database_path = if let Some(tempfile) = &tempfile { + tempfile.path().to_str().unwrap() + } else { + &args.database + }; + let mut options = - SqliteConnectOptions::from_str(&args.database)?.create_if_missing(true).with_regexp(); + SqliteConnectOptions::from_str(database_path)?.create_if_missing(true).with_regexp(); // Performance settings options = options.auto_vacuum(SqliteAutoVacuum::None); From ee860421bd8f5403847afe38311206be3519e012 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 18:34:57 -0400 Subject: [PATCH 46/51] fix: cursor --- crates/torii/core/src/engine.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 112a42051e..ff9f1b20a6 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -157,7 +157,7 @@ impl Engine

{ pub async fn start(&mut self) -> Result<()> { // use the start block provided by user if head is 0 - let (mut head, mut last_pending_block_tx, mut last_pending_block_world_tx) = + let (mut head, mut last_pending_block_world_tx, mut last_pending_block_tx) = self.db.head().await?; if head == 0 { self.db.set_head(self.config.start_block)?; @@ -191,8 +191,8 @@ impl Engine

{ self.db.executor.send(QueryMessage::execute())?; if let Some(new_head) = res { head = new_head.block_number; - last_pending_block_tx = new_head.last_pending_block_tx; last_pending_block_world_tx = new_head.last_pending_block_world_tx; + last_pending_block_tx = new_head.last_pending_block_tx; } } Err(e) => { From 43246b6f43773bd888953e4cbf6b430a971f6959 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 19:24:13 -0400 Subject: [PATCH 47/51] chore --- crates/torii/core/src/engine.rs | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index ff9f1b20a6..106c7f7861 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -21,7 +21,6 @@ use tokio::sync::Semaphore; use tokio::time::{sleep, Instant}; use tracing::{debug, error, info, trace, warn}; -use crate::executor::QueryMessage; use crate::processors::event_message::EventMessageProcessor; use crate::processors::{BlockProcessor, EventProcessor, TransactionProcessor}; use crate::sql::Sql; @@ -112,8 +111,8 @@ pub struct ParallelizedEvent { #[derive(Debug)] pub struct EngineHead { pub block_number: u64, - pub last_pending_block_tx: Option, pub last_pending_block_world_tx: Option, + pub last_pending_block_tx: Option, } #[allow(missing_debug_implementations)] @@ -157,8 +156,7 @@ impl Engine

{ pub async fn start(&mut self) -> Result<()> { // use the start block provided by user if head is 0 - let (mut head, mut last_pending_block_world_tx, mut last_pending_block_tx) = - self.db.head().await?; + let (head, _, _) = self.db.head().await?; if head == 0 { self.db.set_head(self.config.start_block)?; } else if self.config.start_block != 0 { @@ -172,6 +170,8 @@ impl Engine

{ let mut erroring_out = false; loop { + let (head, last_pending_block_world_tx, last_pending_block_tx) = self.db.head().await?; + tokio::select! { _ = shutdown_rx.recv() => { break Ok(()); @@ -187,14 +187,7 @@ impl Engine

{ } match self.process(fetch_result).await { - Ok(res) => { - self.db.executor.send(QueryMessage::execute())?; - if let Some(new_head) = res { - head = new_head.block_number; - last_pending_block_world_tx = new_head.last_pending_block_world_tx; - last_pending_block_tx = new_head.last_pending_block_tx; - } - } + Ok(_) => self.db.execute().await?, Err(e) => { error!(target: LOG_TARGET, error = %e, "Processing fetched data."); erroring_out = true; @@ -468,8 +461,8 @@ impl Engine

{ Ok(EngineHead { block_number: data.block_number - 1, - last_pending_block_tx, last_pending_block_world_tx, + last_pending_block_tx, }) } From 706c7fb70117ac9d47e349035d202956550a89d2 Mon Sep 17 00:00:00 2001 From: Nasr Date: Mon, 30 Sep 2024 20:12:16 -0400 Subject: [PATCH 48/51] wip --- crates/torii/core/src/engine.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/crates/torii/core/src/engine.rs b/crates/torii/core/src/engine.rs index 106c7f7861..010f7d7385 100644 --- a/crates/torii/core/src/engine.rs +++ b/crates/torii/core/src/engine.rs @@ -441,9 +441,6 @@ impl Engine

{ } } - // Directly commit non entity related queries - self.db.execute().await?; - // Process parallelized events self.process_tasks().await?; @@ -498,9 +495,6 @@ impl Engine

{ } } - // Directly commit non entity related queries - self.db.execute().await?; - // Process parallelized events self.process_tasks().await?; @@ -543,8 +537,7 @@ impl Engine

{ } } - local_db.execute().await?; - Ok::<_, anyhow::Error>(local_db) + Ok::<_, anyhow::Error>(()) })); } From 6b6f5a60922fd5e97d1c3b2f4c80121227ceb069 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 2 Oct 2024 15:45:14 -0400 Subject: [PATCH 49/51] cleaning code --- bin/torii/src/main.rs | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/bin/torii/src/main.rs b/bin/torii/src/main.rs index 30c368f047..3e7931cf8f 100644 --- a/bin/torii/src/main.rs +++ b/bin/torii/src/main.rs @@ -165,16 +165,9 @@ async fn main() -> anyhow::Result<()> { }) .expect("Error setting Ctrl-C handler"); - let mut tempfile = None; - if args.database.is_empty() { - tempfile = Some(NamedTempFile::new()?); - } - - let database_path = if let Some(tempfile) = &tempfile { - tempfile.path().to_str().unwrap() - } else { - &args.database - }; + let tempfile = NamedTempFile::new()?; + let database_path = + if args.database.is_empty() { tempfile.path().to_str().unwrap() } else { &args.database }; let mut options = SqliteConnectOptions::from_str(database_path)?.create_if_missing(true).with_regexp(); From 61f0a4b05ede498dbbdfd66c47810841086fd979 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 2 Oct 2024 15:51:35 -0400 Subject: [PATCH 50/51] refactor: handle errors without panic --- crates/torii/core/src/sql.rs | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/crates/torii/core/src/sql.rs b/crates/torii/core/src/sql.rs index 64899fae59..c42e447080 100644 --- a/crates/torii/core/src/sql.rs +++ b/crates/torii/core/src/sql.rs @@ -74,20 +74,29 @@ impl Sql { let indexer: (Option, Option, Option, String) = indexer_query.fetch_one(&self.pool).await?; + Ok(( - indexer.0.map(|h| h.try_into().expect("doesn't fit in u64")).unwrap_or(0), + indexer + .0 + .map(|h| h.try_into().map_err(|_| anyhow!("Head value {} doesn't fit in u64", h))) + .transpose()? + .unwrap_or(0), indexer.1.map(|f| Felt::from_str(&f)).transpose()?, indexer.2.map(|f| Felt::from_str(&f)).transpose()?, )) } pub fn set_head(&mut self, head: u64) -> Result<()> { - let head = Argument::Int(head.try_into().expect("doesn't fit in u64")); + let head = Argument::Int( + head.try_into().map_err(|_| anyhow!("Head value {} doesn't fit in i64", head))?, + ); let id = Argument::FieldElement(self.world_address); - self.executor.send(QueryMessage::other( - "UPDATE contracts SET head = ? WHERE id = ?".to_string(), - vec![head, id], - ))?; + self.executor + .send(QueryMessage::other( + "UPDATE contracts SET head = ? WHERE id = ?".to_string(), + vec![head, id], + )) + .map_err(|e| anyhow!("Failed to send set_head message: {}", e))?; Ok(()) } From 63cca75a0e638f5489d488336568f62646ab53b1 Mon Sep 17 00:00:00 2001 From: Nasr Date: Wed, 2 Oct 2024 16:01:46 -0400 Subject: [PATCH 51/51] use vec --- crates/torii/core/src/executor.rs | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/crates/torii/core/src/executor.rs b/crates/torii/core/src/executor.rs index a9dee8bdf2..503759e43f 100644 --- a/crates/torii/core/src/executor.rs +++ b/crates/torii/core/src/executor.rs @@ -1,4 +1,3 @@ -use std::collections::VecDeque; use std::mem; use anyhow::{Context, Result}; @@ -61,7 +60,7 @@ pub enum QueryType { pub struct Executor<'c> { pool: Pool, transaction: Transaction<'c, Sqlite>, - publish_queue: VecDeque, + publish_queue: Vec, rx: UnboundedReceiver, shutdown_rx: Receiver<()>, } @@ -130,7 +129,7 @@ impl<'c> Executor<'c> { ) -> Result<(Self, UnboundedSender)> { let (tx, rx) = unbounded_channel(); let transaction = pool.begin().await?; - let publish_queue = VecDeque::new(); + let publish_queue = Vec::new(); let shutdown_rx = shutdown_tx.subscribe(); Ok((Executor { pool, transaction, publish_queue, rx, shutdown_rx }, tx)) @@ -187,7 +186,7 @@ impl<'c> Executor<'c> { entity_updated.updated_model = Some(entity); entity_updated.deleted = false; let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.publish_queue.push_back(broker_message); + self.publish_queue.push(broker_message); } QueryType::DeleteEntity(entity) => { let delete_model = query.execute(&mut **tx).await.with_context(|| { @@ -227,14 +226,14 @@ impl<'c> Executor<'c> { } let broker_message = BrokerMessage::EntityUpdated(entity_updated); - self.publish_queue.push_back(broker_message); + self.publish_queue.push(broker_message); } QueryType::RegisterModel => { let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; let model_registered = ModelRegistered::from_row(&row)?; - self.publish_queue.push_back(BrokerMessage::ModelRegistered(model_registered)); + self.publish_queue.push(BrokerMessage::ModelRegistered(model_registered)); } QueryType::EventMessage(entity) => { let row = query.fetch_one(&mut **tx).await.with_context(|| { @@ -243,14 +242,14 @@ impl<'c> Executor<'c> { let mut event_message = EventMessageUpdated::from_row(&row)?; event_message.updated_model = Some(entity); let broker_message = BrokerMessage::EventMessageUpdated(event_message); - self.publish_queue.push_back(broker_message); + self.publish_queue.push(broker_message); } QueryType::StoreEvent => { let row = query.fetch_one(&mut **tx).await.with_context(|| { format!("Failed to execute query: {:?}, args: {:?}", statement, arguments) })?; let event = EventEmitted::from_row(&row)?; - self.publish_queue.push_back(BrokerMessage::EventEmitted(event)); + self.publish_queue.push(BrokerMessage::EventEmitted(event)); } QueryType::Execute => { debug!(target: LOG_TARGET, "Executing query."); @@ -280,7 +279,7 @@ impl<'c> Executor<'c> { let transaction = mem::replace(&mut self.transaction, self.pool.begin().await?); transaction.commit().await?; - while let Some(message) = self.publish_queue.pop_front() { + for message in self.publish_queue.drain(..) { send_broker_message(message); }