diff --git a/crates/torii/core/src/model.rs b/crates/torii/core/src/model.rs index 4abd5c923a..bf7d70de0d 100644 --- a/crates/torii/core/src/model.rs +++ b/crates/torii/core/src/model.rs @@ -120,8 +120,10 @@ impl ModelReader for ModelSQLReader { pub fn build_sql_query( schemas: &Vec, table_name: &str, + model_relation_table: &str, entity_relation_column: &str, where_clause: Option<&str>, + having_clause: Option<&str>, order_by: Option<&str>, limit: Option, offset: Option, @@ -172,6 +174,7 @@ pub fn build_sql_query( // Add base table columns selections.push(format!("{}.id", table_name)); selections.push(format!("{}.keys", table_name)); + selections.push(format!("group_concat({model_relation_table}.model_id) as model_ids")); // Process each model schema for model in schemas { @@ -185,19 +188,37 @@ pub fn build_sql_query( collect_columns(&model_table, "", model, &mut selections); } + joins.push(format!( + "JOIN {model_relation_table} ON {table_name}.id = {model_relation_table}.entity_id" + )); + let selections_clause = selections.join(", "); let joins_clause = joins.join(" "); let mut query = format!("SELECT {} FROM [{}] {}", selections_clause, table_name, joins_clause); - let mut count_query = - format!("SELECT COUNT(DISTINCT {}.id) FROM [{}] {}", table_name, table_name, joins_clause); + // Include model_ids in the subquery and put WHERE before GROUP BY + let mut count_query = format!( + "SELECT COUNT(*) FROM (SELECT {}.id, group_concat({}.model_id) as model_ids FROM [{}] {}", + table_name, model_relation_table, table_name, joins_clause + ); if let Some(where_clause) = where_clause { query += &format!(" WHERE {}", where_clause); count_query += &format!(" WHERE {}", where_clause); } + query += &format!(" GROUP BY {table_name}.id"); + count_query += &format!(" GROUP BY {table_name}.id"); + + if let Some(having_clause) = having_clause { + query += &format!(" HAVING {}", having_clause); + count_query += &format!(" HAVING {}", having_clause); + } + + // Close the subquery + count_query += ") AS filtered_entities"; + // Use custom order by if provided, otherwise default to event_id DESC if let Some(order_clause) = order_by { query += &format!(" ORDER BY {}", order_clause); @@ -490,25 +511,28 @@ mod tests { let query = build_sql_query( &vec![position, player_config], "entities", + "entity_model", "internal_entity_id", None, None, None, None, + None, ) .unwrap(); let expected_query = - "SELECT entities.id, entities.keys, [Test-Position].[player] as \ - \"Test-Position.player\", [Test-Position].[vec.x] as \"Test-Position.vec.x\", \ - [Test-Position].[vec.y] as \"Test-Position.vec.y\", \ + "SELECT entities.id, entities.keys, group_concat(entity_model.model_id) as model_ids, \ + [Test-Position].[player] as \"Test-Position.player\", [Test-Position].[vec.x] as \ + \"Test-Position.vec.x\", [Test-Position].[vec.y] as \"Test-Position.vec.y\", \ [Test-Position].[test_everything] as \"Test-Position.test_everything\", \ [Test-PlayerConfig].[favorite_item] as \"Test-PlayerConfig.favorite_item\", \ [Test-PlayerConfig].[favorite_item.Some] as \ \"Test-PlayerConfig.favorite_item.Some\", [Test-PlayerConfig].[items] as \ \"Test-PlayerConfig.items\" FROM [entities] LEFT JOIN [Test-Position] ON entities.id \ = [Test-Position].internal_entity_id LEFT JOIN [Test-PlayerConfig] ON entities.id = \ - [Test-PlayerConfig].internal_entity_id ORDER BY entities.event_id DESC"; + [Test-PlayerConfig].internal_entity_id JOIN entity_model ON entities.id = \ + entity_model.entity_id GROUP BY entities.id ORDER BY entities.event_id DESC"; assert_eq!(query.0, expected_query); } } diff --git a/crates/torii/core/src/sql/cache.rs b/crates/torii/core/src/sql/cache.rs index 3d72bd093e..db7fcd5d55 100644 --- a/crates/torii/core/src/sql/cache.rs +++ b/crates/torii/core/src/sql/cache.rs @@ -41,6 +41,10 @@ impl ModelCache { } pub async fn models(&self, selectors: &[Felt]) -> Result, Error> { + if selectors.is_empty() { + return Ok(self.model_cache.read().await.values().cloned().collect()); + } + let mut schemas = Vec::with_capacity(selectors.len()); for selector in selectors { schemas.push(self.model(selector).await?); diff --git a/crates/torii/grpc/src/server/mod.rs b/crates/torii/grpc/src/server/mod.rs index 8e884a08ce..b0e7d4ac45 100644 --- a/crates/torii/grpc/src/server/mod.rs +++ b/crates/torii/grpc/src/server/mod.rs @@ -261,147 +261,19 @@ impl DojoWorld { row_events.iter().map(map_row_to_event).collect() } - #[allow(clippy::too_many_arguments)] - async fn fetch_entities( - &self, - table: &str, - entity_relation_column: &str, - entities: Vec<(String, String)>, - dont_include_hashed_keys: bool, - order_by: Option<&str>, - entity_models: Vec, - ) -> Result, Error> { - let entity_models = - entity_models.iter().map(|tag| compute_selector_from_tag(tag)).collect::>(); - - tracing::debug!( - "Fetching entities from table {table} with {} entity/model pairs", - entities.len() - ); - let start = std::time::Instant::now(); - - // Group entities by their model combinations - let mut model_groups: HashMap> = HashMap::new(); - for (entity_id, models_str) in entities { - model_groups.entry(models_str).or_default().push(entity_id); - } - tracing::debug!("Grouped into {} distinct model combinations", model_groups.len()); - - let mut all_entities = Vec::new(); - - let mut tx = self.pool.begin().await?; - tracing::debug!("Started database transaction"); - - // Create a temporary table to store entity IDs due to them potentially exceeding - // SQLite's parameters limit which is 999 - let temp_table_start = std::time::Instant::now(); - sqlx::query( - "CREATE TEMPORARY TABLE temp_entity_ids (id TEXT PRIMARY KEY, model_group TEXT)", - ) - .execute(&mut *tx) - .await?; - tracing::debug!("Created temporary table in {:?}", temp_table_start.elapsed()); - - // Insert all entity IDs into the temporary table - let insert_start = std::time::Instant::now(); - for (model_ids, entity_ids) in &model_groups { - for chunk in entity_ids.chunks(999) { - let placeholders = chunk.iter().map(|_| "(?, ?)").collect::>().join(","); - let query = format!( - "INSERT INTO temp_entity_ids (id, model_group) VALUES {}", - placeholders - ); - let mut query = sqlx::query(&query); - for id in chunk { - query = query.bind(id).bind(model_ids); - } - query.execute(&mut *tx).await?; - } - } - tracing::debug!( - "Inserted all entity IDs into temporary table in {:?}", - insert_start.elapsed() - ); - - let query_start = std::time::Instant::now(); - for (models_str, entity_ids) in &model_groups { - tracing::debug!("Processing model group with {} entities", entity_ids.len()); - let model_ids = models_str - .split(',') - .filter_map(|id| { - let model_id = Felt::from_str(id).unwrap(); - if entity_models.is_empty() || entity_models.contains(&model_id) { - Some(model_id) - } else { - None - } - }) - .collect::>(); - let schemas = self - .model_cache - .models(&model_ids) - .await? - .into_iter() - .map(|m| m.schema) - .collect::>(); - if schemas.is_empty() { - continue; - } - - let (entity_query, _) = build_sql_query( - &schemas, - table, - entity_relation_column, - Some(&format!( - "[{table}].id IN (SELECT id FROM temp_entity_ids WHERE model_group = ?)" - )), - order_by, - None, - None, - )?; - - let query = sqlx::query(&entity_query).bind(models_str); - let rows = query.fetch_all(&mut *tx).await?; - - let schemas = Arc::new(schemas); - - let group_entities: Result, Error> = rows - .par_iter() - .map(|row| map_row_to_entity(row, &schemas, dont_include_hashed_keys)) - .collect(); - - all_entities.extend(group_entities?); - } - tracing::debug!("Processed all model groups in {:?}", query_start.elapsed()); - - sqlx::query("DROP TABLE temp_entity_ids").execute(&mut *tx).await?; - tracing::debug!("Dropped temporary table"); - - tx.commit().await?; - tracing::debug!("Committed transaction"); - - tracing::debug!("Total fetch_entities operation took {:?}", start.elapsed()); - - Ok(all_entities) - } - async fn fetch_historical_event_messages( &self, query: &str, - keys_pattern: Option<&str>, + bind_values: Vec, limit: Option, offset: Option, ) -> Result, Error> { - let db_entities: Vec<(String, String, String, String)> = if keys_pattern.is_some() { - sqlx::query_as(query) - .bind(keys_pattern.unwrap()) - .bind(limit) - .bind(offset) - .fetch_all(&self.pool) - .await? - } else { - sqlx::query_as(query).bind(limit).bind(offset).fetch_all(&self.pool).await? - }; + let mut query = sqlx::query_as(query); + for value in bind_values { + query = query.bind(value); + } + let db_entities: Vec<(String, String, String, String)> = + query.bind(limit).bind(offset).fetch_all(&self.pool).await?; let mut entities = HashMap::new(); for (id, data, model_id, _) in db_entities { @@ -463,96 +335,80 @@ impl DojoWorld { } } }; - - // count query that matches filter_ids - let count_query = format!( - r#" - SELECT count(*) - FROM {table} - {where_clause} - "# - ); - - // total count of rows without limit and offset - let mut count_query = sqlx::query_scalar(&count_query); - if let Some(hashed_keys) = &hashed_keys { - for key in &hashed_keys.hashed_keys { - let key = Felt::from_bytes_be_slice(key); - count_query = count_query.bind(format!("{:#x}", key)); - } + let mut bind_values = vec![]; + if let Some(hashed_keys) = hashed_keys { + bind_values = hashed_keys + .hashed_keys + .iter() + .map(|key| format!("{:#x}", Felt::from_bytes_be_slice(key))) + .collect::>() } - if let Some(entity_updated_after) = entity_updated_after.clone() { - count_query = count_query.bind(entity_updated_after); - } - let total_count = count_query.fetch_optional(&self.pool).await?.unwrap_or(0); - if total_count == 0 { - return Ok((Vec::new(), 0)); + bind_values.push(entity_updated_after); } - // Query to get entity IDs and their model IDs - let mut query = if table == EVENT_MESSAGES_HISTORICAL_TABLE { - format!( - r#" - SELECT {table}.id, {table}.data, {table}.model_id, group_concat({model_relation_table}.model_id) as model_ids - FROM {table} - JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id - {where_clause} - GROUP BY {table}.event_id - ORDER BY {table}.event_id DESC - "# - ) - } else { - format!( - r#" - SELECT {table}.id, group_concat({model_relation_table}.model_id) as model_ids - FROM {table} - JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id - {where_clause} - GROUP BY {table}.id - ORDER BY {table}.event_id DESC - "# - ) - }; + let entity_models = + entity_models.iter().map(|model| compute_selector_from_tag(model)).collect::>(); + let schemas = self + .model_cache + .models(&entity_models) + .await? + .iter() + .map(|m| m.schema.clone()) + .collect::>(); - if limit.is_some() { - query += " LIMIT ?" - } + let having_clause = entity_models + .iter() + .map(|model| format!("INSTR(model_ids, '{:#x}') > 0", model)) + .collect::>() + .join(" OR "); - if offset.is_some() { - query += " OFFSET ?" + let (query, count_query) = build_sql_query( + &schemas, + table, + model_relation_table, + entity_relation_column, + if where_clause.is_empty() { None } else { Some(&where_clause) }, + if !having_clause.is_empty() { Some(&having_clause) } else { None }, + order_by, + limit, + offset, + )?; + + let mut count_query = sqlx::query_scalar(&count_query); + for value in &bind_values { + count_query = count_query.bind(value); + } + let total_count = count_query.fetch_one(&self.pool).await?; + if total_count == 0 { + return Ok((Vec::new(), 0)); } if table == EVENT_MESSAGES_HISTORICAL_TABLE { let entities = - self.fetch_historical_event_messages(&query, None, limit, offset).await?; + self.fetch_historical_event_messages(&format!( + r#" + SELECT {table}.id, {table}.data, {table}.model_id, group_concat({model_relation_table}.model_id) as model_ids + FROM {table} + JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id + {where_clause} + GROUP BY {table}.event_id + ORDER BY {table}.event_id DESC + "# + ), bind_values, limit, offset).await?; return Ok((entities, total_count)); } - let mut query = sqlx::query_as(&query); - if let Some(hashed_keys) = hashed_keys { - for key in hashed_keys.hashed_keys { - let key = Felt::from_bytes_be_slice(&key); - query = query.bind(format!("{:#x}", key)); - } - } - - if let Some(entity_updated_after) = entity_updated_after.clone() { - query = query.bind(entity_updated_after); + let mut query = sqlx::query(&query); + for value in &bind_values { + query = query.bind(value); } - query = query.bind(limit).bind(offset); - let db_entities: Vec<(String, String)> = query.fetch_all(&self.pool).await?; + let entities = query.fetch_all(&self.pool).await?; + let entities = entities + .iter() + .map(|row| map_row_to_entity(row, &schemas, dont_include_hashed_keys)) + .collect::, Error>>()?; - let entities = self - .fetch_entities( - table, - entity_relation_column, - db_entities, - dont_include_hashed_keys, - order_by, - entity_models, - ) - .await?; Ok((entities, total_count)) } @@ -572,158 +428,85 @@ impl DojoWorld { ) -> Result<(Vec, u32), Error> { let keys_pattern = build_keys_pattern(keys_clause)?; - // total count of rows that matches keys_pattern without limit and offset - let count_query = format!( - r#" - SELECT count(*) - FROM {table} - {} - "#, - if !keys_clause.models.is_empty() { - // split the model names to namespace and model - let model_ids = keys_clause - .models - .iter() - .map(|model| { - model - .split_once('-') - .ok_or(QueryError::InvalidNamespacedModel(model.clone())) - }) - .collect::, _>>()?; - // get the model selector from namespace and model and format - let model_ids_str = model_ids - .iter() - .map(|(namespace, model)| { - format!("'{:#x}'", compute_selector_from_names(namespace, model)) - }) - .collect::>() - .join(","); - format!( - r#" - JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id - WHERE {model_relation_table}.model_id IN ({}) - AND {table}.keys REGEXP ? - {} - "#, - model_ids_str, - if entity_updated_after.is_some() { - format!("AND {table}.updated_at >= ?") - } else { - String::new() - } - ) + let where_clause = format!( + "{table}.keys REGEXP ? {}", + if entity_updated_after.is_some() { + format!("AND {table}.updated_at >= ?") } else { - format!( - r#" - WHERE {table}.keys REGEXP ? - {} - "#, - if entity_updated_after.is_some() { - format!("AND {table}.updated_at >= ?") - } else { - String::new() - } - ) + String::new() } ); - let total_count = sqlx::query_scalar(&count_query) - .bind(&keys_pattern) - .bind(entity_updated_after.clone()) - .fetch_optional(&self.pool) - .await? - .unwrap_or(0); - if total_count == 0 { - return Ok((Vec::new(), 0)); + let mut bind_values = vec![keys_pattern]; + if let Some(entity_updated_after) = entity_updated_after.clone() { + bind_values.push(entity_updated_after); } - let mut models_query = if table == EVENT_MESSAGES_HISTORICAL_TABLE { - format!( - r#" - SELECT {table}.id, {table}.data, {table}.model_id, group_concat({model_relation_table}.model_id) as model_ids - FROM {table} - JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id - WHERE {table}.keys REGEXP ? - {} - GROUP BY {table}.event_id - "#, - if entity_updated_after.is_some() { - format!("AND {table}.updated_at >= ?") - } else { - String::new() - } - ) - } else { - format!( - r#" - SELECT {table}.id, group_concat({model_relation_table}.model_id) as model_ids - FROM {table} - JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id - WHERE {table}.keys REGEXP ? - {} - GROUP BY {table}.id - "#, - if entity_updated_after.is_some() { - format!("AND {table}.updated_at >= ?") - } else { - String::new() - } - ) - }; - - if !keys_clause.models.is_empty() { - // filter by models - models_query += &format!( - "HAVING {}", - keys_clause - .models - .iter() - .map(|model| { - let (namespace, name) = model - .split_once('-') - .ok_or(QueryError::InvalidNamespacedModel(model.clone()))?; - let model_id = compute_selector_from_names(namespace, name); - Ok(format!("INSTR(model_ids, '{:#x}') > 0", model_id)) - }) - .collect::, Error>>()? - .join(" OR ") - .as_str() - ); - } + let entity_models = + entity_models.iter().map(|model| compute_selector_from_tag(model)).collect::>(); + let schemas = self + .model_cache + .models(&entity_models) + .await? + .iter() + .map(|m| m.schema.clone()) + .collect::>(); - models_query += &format!(" ORDER BY {table}.event_id DESC"); + let having_clause = entity_models + .iter() + .map(|model| format!("INSTR(model_ids, '{:#x}') > 0", model)) + .collect::>() + .join(" OR "); + let (query, count_query) = build_sql_query( + &schemas, + table, + model_relation_table, + entity_relation_column, + Some(&where_clause), + if !having_clause.is_empty() { Some(&having_clause) } else { None }, + order_by, + limit, + offset, + )?; - if limit.is_some() { - models_query += " LIMIT ?"; + let mut count_query = sqlx::query_scalar(&count_query); + for value in &bind_values { + count_query = count_query.bind(value); } - if offset.is_some() { - models_query += " OFFSET ?"; + let total_count = count_query.fetch_one(&self.pool).await?; + if total_count == 0 { + return Ok((Vec::new(), 0)); } if table == EVENT_MESSAGES_HISTORICAL_TABLE { - let entities = self - .fetch_historical_event_messages(&models_query, Some(&keys_pattern), limit, offset) - .await?; + let entities = self.fetch_historical_event_messages( + &format!( + r#" + SELECT {table}.id, {table}.data, {table}.model_id, group_concat({model_relation_table}.model_id) as model_ids + FROM {table} + JOIN {model_relation_table} ON {table}.id = {model_relation_table}.entity_id + WHERE {where_clause} + GROUP BY {table}.event_id + ORDER BY {table}.event_id DESC + "# + ), + bind_values, + limit, + offset + ).await?; return Ok((entities, total_count)); } - let mut query = sqlx::query_as(&models_query).bind(&keys_pattern); - if let Some(entity_updated_after) = entity_updated_after.clone() { - query = query.bind(entity_updated_after); + let mut query = sqlx::query(&query); + for value in &bind_values { + query = query.bind(value); } - query = query.bind(limit).bind(offset); - let db_entities: Vec<(String, String)> = query.fetch_all(&self.pool).await?; + let entities = query.fetch_all(&self.pool).await?; + let entities = entities + .iter() + .map(|row| map_row_to_entity(row, &schemas, dont_include_hashed_keys)) + .collect::, Error>>()?; - let entities = self - .fetch_entities( - table, - entity_relation_column, - db_entities, - dont_include_hashed_keys, - order_by, - entity_models, - ) - .await?; Ok((entities, total_count)) } @@ -855,8 +638,10 @@ impl DojoWorld { let (entity_query, count_query) = build_sql_query( &schemas, table, + model_relation_table, entity_relation_column, Some(&where_clause), + None, order_by, limit, offset, @@ -901,76 +686,53 @@ impl DojoWorld { entity_models: Vec, entity_updated_after: Option, ) -> Result<(Vec, u32), Error> { - let (where_clause, having_clause, join_clause, bind_values) = - build_composite_clause(table, model_relation_table, &composite, entity_updated_after)?; - - let count_query = if !having_clause.is_empty() { - format!( - r#" - SELECT COUNT(*) FROM ( - SELECT DISTINCT [{table}].id - FROM [{table}] - JOIN {model_relation_table} ON [{table}].id = {model_relation_table}.entity_id - {join_clause} - {where_clause} - GROUP BY [{table}].id - {having_clause} - ) as filtered_count - "#, - ) - } else { - format!( - r#" - SELECT COUNT(DISTINCT [{table}].id) - FROM [{table}] - JOIN {model_relation_table} ON [{table}].id = {model_relation_table}.entity_id - {join_clause} - {where_clause} - "#, - ) - }; + let (where_clause, bind_values) = + build_composite_clause(table, &composite, entity_updated_after)?; + + let entity_models = + entity_models.iter().map(|model| compute_selector_from_tag(model)).collect::>(); + let schemas = + self.model_cache.models(&entity_models).await?.into_iter().map(|m| m.schema).collect(); + + let having_clause = entity_models + .iter() + .map(|model| format!("INSTR(model_ids, '{:#x}') > 0", model)) + .collect::>() + .join(" OR "); - let mut count_query = sqlx::query_scalar::<_, u32>(&count_query); + let (query, count_query) = build_sql_query( + &schemas, + table, + model_relation_table, + entity_relation_column, + if where_clause.is_empty() { None } else { Some(&where_clause) }, + if having_clause.is_empty() { None } else { Some(&having_clause) }, + order_by, + limit, + offset, + )?; + + let mut count_query = sqlx::query_scalar(&count_query); for value in &bind_values { count_query = count_query.bind(value); } - let total_count = count_query.fetch_optional(&self.pool).await?.unwrap_or(0); + + let total_count = count_query.fetch_one(&self.pool).await?; if total_count == 0 { return Ok((Vec::new(), 0)); } - let query = format!( - r#" - SELECT DISTINCT [{table}].id, group_concat({model_relation_table}.model_id) as model_ids - FROM [{table}] - JOIN {model_relation_table} ON [{table}].id = {model_relation_table}.entity_id - {join_clause} - {where_clause} - GROUP BY [{table}].id - {having_clause} - ORDER BY [{table}].event_id DESC - LIMIT ? OFFSET ? - "#, - ); - - let mut db_query = sqlx::query_as(&query); + println!("query: {}", query); + let mut query = sqlx::query(&query); for value in &bind_values { - db_query = db_query.bind(value); + query = query.bind(value); } - db_query = db_query.bind(limit).bind(offset); - - let db_entities: Vec<(String, String)> = db_query.fetch_all(&self.pool).await?; + let db_entities = query.fetch_all(&self.pool).await?; - let entities = self - .fetch_entities( - table, - entity_relation_column, - db_entities, - dont_include_hashed_keys, - order_by, - entity_models, - ) - .await?; + let entities = db_entities + .par_iter() + .map(|row| map_row_to_entity(row, &schemas, dont_include_hashed_keys)) + .collect::, Error>>()?; Ok((entities, total_count)) } @@ -1288,8 +1050,15 @@ fn map_row_to_entity( dont_include_hashed_keys: bool, ) -> Result { let hashed_keys = Felt::from_str(&row.get::("id")).map_err(ParseError::FromStr)?; + let model_ids = row + .get::("model_ids") + .split(',') + .map(|id| Felt::from_str(id).map_err(ParseError::FromStr)) + .collect::, _>>()?; + let models = schemas .iter() + .filter(|schema| model_ids.contains(&compute_selector_from_tag(&schema.name()))) .map(|schema| { let mut ty = schema.clone(); map_row_to_ty("", &schema.name(), &mut ty, row)?; @@ -1338,16 +1107,12 @@ fn build_keys_pattern(clause: &proto::types::KeysClause) -> Result, -) -> Result<(String, String, String, Vec), Error> { +) -> Result<(String, Vec), Error> { let is_or = composite.operator == LogicalOperator::Or as i32; let mut where_clauses = Vec::new(); - let mut join_clauses = Vec::new(); - let mut having_clauses = Vec::new(); let mut bind_values = Vec::new(); - let mut seen_models = HashMap::new(); for clause in &composite.clauses { match clause.clause_type.as_ref().unwrap() { @@ -1369,17 +1134,17 @@ fn build_composite_clause( where_clauses.push(format!("({table}.keys REGEXP ?)")); // Add model checks for specified models - for model in &keys.models { - let (namespace, model_name) = model - .split_once('-') - .ok_or(QueryError::InvalidNamespacedModel(model.clone()))?; - let model_id = compute_selector_from_names(namespace, model_name); - - having_clauses.push(format!( - "INSTR(group_concat({model_relation_table}.model_id), '{:#x}') > 0", - model_id - )); - } + + // NOTE: disabled since we are now using the top level entity models + + // for model in &keys.models { + // let (namespace, model_name) = model + // .split_once('-') + // .ok_or(QueryError::InvalidNamespacedModel(model.clone()))?; + // let model_id = compute_selector_from_names(namespace, model_name); + + // having_clauses.push(format!("INSTR(model_ids, '{:#x}') > 0", model_id)); + // } } ClauseType::Member(member) => { let comparison_operator = ComparisonOperator::from_repr(member.operator as usize) @@ -1414,65 +1179,27 @@ fn build_composite_clause( let value = prepare_comparison(&value, &mut bind_values)?; let model = member.model.clone(); - // Get or create unique alias for this model - let alias = seen_models.entry(model.clone()).or_insert_with(|| { - let (namespace, model_name) = model - .split_once('-') - .ok_or(QueryError::InvalidNamespacedModel(model.clone())) - .unwrap(); - let model_id = compute_selector_from_names(namespace, model_name); - - // Add model check to having clause - having_clauses.push(format!( - "INSTR(group_concat({model_relation_table}.model_id), '{:#x}') > 0", - model_id - )); - - // Add join clause - join_clauses.push(format!( - "LEFT JOIN [{model}] AS [{model}] ON [{table}].id = \ - [{model}].internal_entity_id" - )); - - model.clone() - }); // Use the column name directly since it's already flattened where_clauses - .push(format!("([{alias}].[{}] {comparison_operator} {value})", member.member)); + .push(format!("([{model}].[{}] {comparison_operator} {value})", member.member)); } ClauseType::Composite(nested) => { // Handle nested composite by recursively building the clause - let (nested_where, nested_having, nested_join, nested_values) = - build_composite_clause( - table, - model_relation_table, - nested, - entity_updated_after.clone(), - )?; + let (nested_where, nested_values) = + build_composite_clause(table, nested, entity_updated_after.clone())?; if !nested_where.is_empty() { - where_clauses.push(format!("({})", nested_where.trim_start_matches("WHERE "))); + where_clauses.push(nested_where); } - if !nested_having.is_empty() { - having_clauses - .push(format!("({})", nested_having.trim_start_matches("HAVING "))); - } - join_clauses.extend( - nested_join - .split_whitespace() - .filter(|&s| s.starts_with("LEFT")) - .map(String::from), - ); bind_values.extend(nested_values); } } } - let join_clause = join_clauses.join(" "); let where_clause = if !where_clauses.is_empty() { format!( - "WHERE {} {}", + "{} {}", where_clauses.join(if is_or { " OR " } else { " AND " }), if let Some(entity_updated_after) = entity_updated_after.clone() { bind_values.push(entity_updated_after); @@ -1483,18 +1210,12 @@ fn build_composite_clause( ) } else if let Some(entity_updated_after) = entity_updated_after.clone() { bind_values.push(entity_updated_after); - format!("WHERE {table}.updated_at >= ?") - } else { - String::new() - }; - - let having_clause = if !having_clauses.is_empty() { - format!("HAVING {}", having_clauses.join(if is_or { " OR " } else { " AND " })) + format!("{table}.updated_at >= ?") } else { String::new() }; - Ok((where_clause, having_clause, join_clause, bind_values)) + Ok((where_clause, bind_values)) } type ServiceResult = Result, Status>; diff --git a/crates/torii/grpc/src/server/tests/entities_test.rs b/crates/torii/grpc/src/server/tests/entities_test.rs index 22a38fda91..9791c09455 100644 --- a/crates/torii/grpc/src/server/tests/entities_test.rs +++ b/crates/torii/grpc/src/server/tests/entities_test.rs @@ -142,7 +142,7 @@ async fn test_entities_queries(sequencer: &RunnerCtx) { None, false, None, - vec![], + vec!["ns-Moves".to_string(), "ns-Position".to_string()], None, ) .await diff --git a/examples/spawn-and-move/Scarb.lock b/examples/spawn-and-move/Scarb.lock index 68efea17cb..76b825ff2e 100644 --- a/examples/spawn-and-move/Scarb.lock +++ b/examples/spawn-and-move/Scarb.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "dojo_examples" -version = "1.0.5" +version = "1.0.8" dependencies = [ "armory", "bestiary",