diff --git a/.env b/.env index dd769d78..94c5549d 100644 --- a/.env +++ b/.env @@ -8,3 +8,4 @@ RABBITMQ_DEFAULT_USER=admin RABBITMQ_DEFAULT_PASS=adminpasswd CLICKHOUSE_USER=default +POSTGRES_PORT=5433 \ No newline at end of file diff --git a/.gitignore b/.gitignore index 2c808d87..fd6aad22 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ **/.env.local **/.env .DS_Store -**/..DS_Store \ No newline at end of file +**/..DS_Store +.idea \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 7e46ae9f..9b2b756f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -23,14 +23,14 @@ We do this to avoid legal issues and disputes, and to stay compliant with releva Don't get overwhelmed by the number of docker-compose files. Here's a quick overview: -- `docker-compose.yml` is the simplest one that spins up frontend, app-server, and postgres. Good for quickstarts. +- `docker-compose.yml` is the simplest one that spins up frontend, app-server, clickhouse, and postgres. Good for quickstarts. - `docker-compose-full.yml` is the one you want to use for running the full stack locally. This is the best for self-hosting. - `docker-compose-local-dev-full.yml` full file for local development. To be used when you make changes to the backend. It will only run the dependency services (postgres, qdrant, clickhouse, rabbitmq). You will need to run `cargo r`, `pnpm run dev`, and `python server.py` manually. - `docker-compose-local-dev.yml` is the one you want to use for local development. It will only - run postgres and app-server. Good for frontend changes. + run postgres, clickhouse, and app-server. Good for frontend changes. - `docker-compose-local-build.yml` will build the services from the source and run them in production mode. This is good for self-hosting with your own changes, or for testing the changes after developing on your own and before opening a PR. @@ -38,7 +38,7 @@ or for testing the changes after developing on your own and before opening a PR. |---------|-------------------|------------------------|------------------------------|----------------------------|------------------------------| | postgres | ✅ | ✅ | ✅ | ✅ | ✅ | | qdrant | ❌ | ✅ | ✅ | ❌ | ✅ | -| clickhouse | ❌ | ✅ | ✅ | ❌ | ✅ | +| clickhouse | ✅ | ✅ | ✅ | ✅ | ✅ | | rabbitmq | ❌ | ✅ | ✅ | ❌ | ✅ | | app-server | ℹ️ | ✅ | 💻 | ℹ️ | 🔧 | | frontend | ℹ️ | ✅ | 💻 | 💻 | 🔧 | diff --git a/README.md b/README.md index 7660b660..550e00b1 100644 --- a/README.md +++ b/README.md @@ -48,8 +48,8 @@ cd lmnr docker compose up -d ``` -This will spin up a lightweight version of the stack with Postgres, app-server, and frontend. This is good for a quickstart -or for lightweight usage. You can access the UI at http://localhost:3000 in your browser. +This will spin up a lightweight version of the stack with Postgres, clickhouse, app-server, and frontend. This is good for a quickstart +or for lightweight usage. You can access the UI at http://localhost:5667 in your browser. For production environment, we recommend using our [managed platform](https://www.lmnr.ai/projects) or `docker compose -f docker-compose-full.yml up -d`. diff --git a/app-server/.env.example b/app-server/.env.example index c841260c..ab10240a 100644 --- a/app-server/.env.example +++ b/app-server/.env.example @@ -1,6 +1,6 @@ SEMANTIC_SEARCH_URL=http://localhost:8080 # postgres://user:password@host:port/dbname -DATABASE_URL="postgres://postgres:postgres_passwordabc@localhost:5432/postgres" +DATABASE_URL="postgres://postgres:postgres_passwordabc@localhost:5433/postgres" PORT=8000 GRPC_PORT=8001 diff --git a/app-server/src/api/v1/traces.rs b/app-server/src/api/v1/traces.rs index 2b38ae0f..89c3e2f8 100644 --- a/app-server/src/api/v1/traces.rs +++ b/app-server/src/api/v1/traces.rs @@ -29,10 +29,12 @@ pub async fn process_traces( project_api_key: ProjectApiKey, rabbitmq_connection: web::Data>>, db: web::Data, + clickhouse: web::Data, cache: web::Data, ) -> ResponseResult { let db = db.into_inner(); let cache = cache.into_inner(); + let clickhouse = clickhouse.into_inner().as_ref().clone(); let request = ExportTraceServiceRequest::decode(body).map_err(|e| { anyhow::anyhow!("Failed to decode ExportTraceServiceRequest from bytes. {e}") })?; @@ -57,6 +59,7 @@ pub async fn process_traces( project_api_key.project_id, rabbitmq_connection, db, + clickhouse, cache, ) .await?; diff --git a/app-server/src/ch/evaluation_scores.rs b/app-server/src/ch/evaluation_scores.rs index 469397a1..55a72741 100644 --- a/app-server/src/ch/evaluation_scores.rs +++ b/app-server/src/ch/evaluation_scores.rs @@ -4,12 +4,8 @@ use clickhouse::Row; use serde::{Deserialize, Serialize, Serializer}; use uuid::Uuid; -use crate::{ - evaluations::utils::EvaluationDatapointResult, - features::{is_feature_enabled, Feature}, -}; - use super::utils::chrono_to_nanoseconds; +use crate::evaluations::utils::EvaluationDatapointResult; fn serialize_timestamp(timestamp: &DateTime, serializer: S) -> Result where @@ -78,10 +74,6 @@ pub async fn insert_evaluation_scores( return Ok(()); } - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } - let ch_insert = clickhouse.insert("evaluation_scores"); match ch_insert { Ok(mut ch_insert) => { @@ -284,9 +276,6 @@ pub async fn delete_evaluation_score( result_id: Uuid, label_id: Uuid, ) -> Result<()> { - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } // Note, this does not immediately physically delete the data. // https://clickhouse.com/docs/en/sql-reference/statements/delete clickhouse diff --git a/app-server/src/ch/events.rs b/app-server/src/ch/events.rs index 4bde840a..c2707840 100644 --- a/app-server/src/ch/events.rs +++ b/app-server/src/ch/events.rs @@ -3,7 +3,7 @@ use clickhouse::Row; use serde::Serialize; use uuid::Uuid; -use crate::{db::events::Event, features::is_feature_enabled, Feature}; +use crate::db::events::Event; use super::utils::chrono_to_nanoseconds; @@ -30,9 +30,6 @@ impl CHEvent { } pub async fn insert_events(clickhouse: clickhouse::Client, events: Vec) -> Result<()> { - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } if events.is_empty() { return Ok(()); } diff --git a/app-server/src/ch/labels.rs b/app-server/src/ch/labels.rs index b63447b3..c792a361 100644 --- a/app-server/src/ch/labels.rs +++ b/app-server/src/ch/labels.rs @@ -4,10 +4,7 @@ use clickhouse::Row; use serde::{Deserialize, Serialize}; use uuid::Uuid; -use crate::{ - db::labels::LabelSource, - features::{is_feature_enabled, Feature}, -}; +use crate::db::labels::LabelSource; use super::utils::chrono_to_nanoseconds; @@ -76,10 +73,6 @@ pub async fn insert_label( value: f64, span_id: Uuid, ) -> Result<()> { - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } - let label = CHLabel::new( project_id, class_id, @@ -120,9 +113,6 @@ pub async fn delete_label( span_id: Uuid, id: Uuid, ) -> Result<()> { - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } // Note, this does not immediately physically delete the data. // https://clickhouse.com/docs/en/sql-reference/statements/delete client diff --git a/app-server/src/ch/spans.rs b/app-server/src/ch/spans.rs index 99b083d6..c0e5134b 100644 --- a/app-server/src/ch/spans.rs +++ b/app-server/src/ch/spans.rs @@ -6,7 +6,6 @@ use uuid::Uuid; use crate::{ db::spans::{Span, SpanType}, - features::{is_feature_enabled, Feature}, traces::spans::SpanUsage, }; @@ -97,9 +96,6 @@ impl CHSpan { } pub async fn insert_span(clickhouse: clickhouse::Client, span: &CHSpan) -> Result<()> { - if !is_feature_enabled(Feature::FullBuild) { - return Ok(()); - } let ch_insert = clickhouse.insert("spans"); match ch_insert { Ok(mut ch_insert) => { diff --git a/app-server/src/ch/utils.rs b/app-server/src/ch/utils.rs index c3b2be52..2503c2d3 100644 --- a/app-server/src/ch/utils.rs +++ b/app-server/src/ch/utils.rs @@ -4,10 +4,7 @@ use clickhouse::Row; use serde::Deserialize; use uuid::Uuid; -use crate::{ - db::utils::validate_sql_string, - features::{is_feature_enabled, Feature}, -}; +use crate::db::utils::validate_sql_string; use super::modifiers::GroupByInterval; @@ -101,13 +98,6 @@ async fn get_time_bounds( return Err(anyhow::anyhow!("Invalid column name: {}", column_name)); } - if !is_feature_enabled(Feature::FullBuild) { - return Ok(TimeBounds { - min_time: chrono_to_nanoseconds(Utc::now() - chrono::Duration::days(1)), - max_time: chrono_to_nanoseconds(Utc::now()), - }); - } - let query_string = format!( "SELECT MIN({column_name}) AS min_time, diff --git a/app-server/src/main.rs b/app-server/src/main.rs index 077e990f..83c6d3e5 100644 --- a/app-server/src/main.rs +++ b/app-server/src/main.rs @@ -168,28 +168,22 @@ fn main() -> anyhow::Result<()> { let interrupt_senders = Arc::new(DashMap::>::new()); - let clickhouse = if is_feature_enabled(Feature::FullBuild) { - let clickhouse_url = env::var("CLICKHOUSE_URL").expect("CLICKHOUSE_URL must be set"); - let clickhouse_user = env::var("CLICKHOUSE_USER").expect("CLICKHOUSE_USER must be set"); - let clickhouse_password = env::var("CLICKHOUSE_PASSWORD"); - // https://clickhouse.com/docs/en/cloud/bestpractices/asynchronous-inserts -> Create client which will wait for async inserts - // For now, we're not waiting for inserts to finish, but later need to add queue and batch on client-side - let mut client = clickhouse::Client::default() - .with_url(clickhouse_url) - .with_user(clickhouse_user) - .with_database("default") - .with_option("async_insert", "1") - .with_option("wait_for_async_insert", "0"); - if let Ok(clickhouse_password) = clickhouse_password { - client = client.with_password(clickhouse_password); - } else { + let clickhouse_url = env::var("CLICKHOUSE_URL").expect("CLICKHOUSE_URL must be set"); + let clickhouse_user = env::var("CLICKHOUSE_USER").expect("CLICKHOUSE_USER must be set"); + let clickhouse_password = env::var("CLICKHOUSE_PASSWORD"); + let client = clickhouse::Client::default() + .with_url(clickhouse_url) + .with_user(clickhouse_user) + .with_database("default") + .with_option("async_insert", "1") + .with_option("wait_for_async_insert", "0"); + + let clickhouse = match clickhouse_password { + Ok(password) => client.with_password(password), + _ => { log::warn!("CLICKHOUSE_PASSWORD not set, using without password"); + client } - client - } else { - // This client does not connect to ClickHouse, and the feature flag must be checked before using it - // TODO: wrap this in a dyn trait object - clickhouse::Client::default() }; let mut rabbitmq_connection = None; @@ -253,6 +247,7 @@ fn main() -> anyhow::Result<()> { let runtime_handle_for_http = runtime_handle.clone(); let db_for_http = db.clone(); let cache_for_http = cache.clone(); + let clickhouse_for_grpc = clickhouse.clone(); let http_server_handle = thread::Builder::new() .name("http".to_string()) .spawn(move || { @@ -555,6 +550,7 @@ fn main() -> anyhow::Result<()> { db.clone(), cache.clone(), rabbitmq_connection_grpc.clone(), + clickhouse_for_grpc, ); Server::builder() diff --git a/app-server/src/routes/traces.rs b/app-server/src/routes/traces.rs index 13a3e430..90827638 100644 --- a/app-server/src/routes/traces.rs +++ b/app-server/src/routes/traces.rs @@ -3,8 +3,6 @@ use std::sync::Arc; use super::{GetMetricsQueryParams, ResponseResult}; use crate::ch::utils::get_bounds; -use crate::ch::MetricTimeValue; -use crate::features::{is_feature_enabled, Feature}; use crate::semantic_search::semantic_search_grpc::DateRanges; use crate::semantic_search::SemanticSearch; use crate::{ @@ -122,10 +120,6 @@ pub async fn get_traces_metrics( past_hours: "all".to_string(), })); - if !is_feature_enabled(Feature::FullBuild) { - return Ok(HttpResponse::Ok().json(Vec::>::new())); - } - match defaulted_range { DateRange::Relative(interval) => { if interval.past_hours == "all" { diff --git a/app-server/src/traces/consumer.rs b/app-server/src/traces/consumer.rs index 58864029..e686ebce 100644 --- a/app-server/src/traces/consumer.rs +++ b/app-server/src/traces/consumer.rs @@ -57,9 +57,6 @@ async fn inner_process_queue_spans( clickhouse: clickhouse::Client, storage: Arc, ) { - if !is_feature_enabled(Feature::FullBuild) { - return; - } // Safe to unwrap because we checked is_feature_enabled above let channel = rabbitmq_connection.unwrap().create_channel().await.unwrap(); diff --git a/app-server/src/traces/grpc_service.rs b/app-server/src/traces/grpc_service.rs index d219dffb..bd24c729 100644 --- a/app-server/src/traces/grpc_service.rs +++ b/app-server/src/traces/grpc_service.rs @@ -20,6 +20,7 @@ pub struct ProcessTracesService { db: Arc, cache: Arc, rabbitmq_connection: Option>, + clickhouse: clickhouse::Client, } impl ProcessTracesService { @@ -27,11 +28,13 @@ impl ProcessTracesService { db: Arc, cache: Arc, rabbitmq_connection: Option>, + clickhouse: clickhouse::Client, ) -> Self { Self { db, cache, rabbitmq_connection, + clickhouse, } } } @@ -71,6 +74,7 @@ impl TraceService for ProcessTracesService { project_id, self.rabbitmq_connection.clone(), self.db.clone(), + self.clickhouse.clone(), self.cache.clone(), ) .await diff --git a/app-server/src/traces/producer.rs b/app-server/src/traces/producer.rs index 0f5d5b55..2cfde157 100644 --- a/app-server/src/traces/producer.rs +++ b/app-server/src/traces/producer.rs @@ -10,6 +10,7 @@ use uuid::Uuid; use crate::{ api::v1::traces::RabbitMqSpanMessage, cache::Cache, + ch::{self, spans::CHSpan}, db::{events::Event, spans::Span, DB}, features::{is_feature_enabled, Feature}, opentelemetry::opentelemetry::proto::collector::trace::v1::{ @@ -25,6 +26,7 @@ pub async fn push_spans_to_queue( project_id: Uuid, rabbitmq_connection: Option>, db: Arc, + clickhouse: clickhouse::Client, cache: Arc, ) -> Result { if !is_feature_enabled(Feature::FullBuild) { @@ -54,6 +56,20 @@ pub async fn push_spans_to_queue( e ); } + + let ch_span = CHSpan::from_db_span(&span, span_usage, project_id); + + let insert_span_res = + ch::spans::insert_span(clickhouse.clone(), &ch_span).await; + + if let Err(e) = insert_span_res { + log::error!( + "Failed to insert span into Clickhouse. span_id [{}], project_id [{}]: {:?}", + span.span_id, + project_id, + e + ); + } } } } diff --git a/docker-compose-full.yml b/docker-compose-full.yml index 44cf2c5c..839d30fc 100644 --- a/docker-compose-full.yml +++ b/docker-compose-full.yml @@ -7,18 +7,13 @@ name: lmnr services: qdrant: image: qdrant/qdrant - ports: - - "6333:6333" - - "6334:6334" volumes: - type: volume source: qdrant-data target: /data - + rabbitmq: image: rabbitmq - ports: - - "5672:5672" environment: RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER} RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS} @@ -32,8 +27,6 @@ services: build: context: ./clickhouse container_name: clickhouse - ports: - - "8123:8123" volumes: - type: volume source: clickhouse-data @@ -52,8 +45,6 @@ services: semantic-search-service: image: ghcr.io/lmnr-ai/semantic-search-service - ports: - - "8080:8080" depends_on: - qdrant environment: @@ -65,14 +56,10 @@ services: python-executor: image: ghcr.io/lmnr-ai/python-executor - ports: - - "8811:8811" pull_policy: always postgres: image: postgres:16 - ports: - - "5432:5432" volumes: - postgres-data:/var/lib/postgresql/data environment: @@ -80,16 +67,13 @@ services: POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: ${POSTGRES_DB} healthcheck: - test: ["CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}"] + test: [ "CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}" ] interval: 2s timeout: 5s retries: 5 app-server: image: ghcr.io/lmnr-ai/app-server - ports: - - "8000:8000" - - "8001:8001" depends_on: semantic-search-service: condition: service_started @@ -114,7 +98,7 @@ services: frontend: image: ghcr.io/lmnr-ai/frontend ports: - - "3000:3000" + - "5667:5667" pull_policy: always depends_on: postgres: @@ -122,13 +106,13 @@ services: clickhouse: condition: service_started environment: - - PORT=3000 + - PORT=5667 - BACKEND_URL=http://app-server:8000 - SHARED_SECRET_TOKEN=${SHARED_SECRET_TOKEN} - DATABASE_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} - - NEXTAUTH_URL=http://localhost:3000 + - NEXTAUTH_URL=http://localhost:5667 - NEXTAUTH_SECRET=some_secret - - NEXT_PUBLIC_URL=http://localhost:3000 + - NEXT_PUBLIC_URL=http://localhost:5667 - ENVIRONMENT=FULL - CLICKHOUSE_URL=http://clickhouse:8123 - CLICKHOUSE_USER=${CLICKHOUSE_USER} diff --git a/docker-compose-local-build.yml b/docker-compose-local-build.yml index 18b320a6..9d5e3fb8 100644 --- a/docker-compose-local-build.yml +++ b/docker-compose-local-build.yml @@ -5,9 +5,6 @@ name: lmnr services: qdrant: image: qdrant/qdrant - ports: - - "6333:6333" - - "6334:6334" volumes: - type: volume source: qdrant-data @@ -15,8 +12,6 @@ services: rabbitmq: image: rabbitmq - ports: - - "5672:5672" environment: RABBITMQ_DEFAULT_USER: ${RABBITMQ_DEFAULT_USER} RABBITMQ_DEFAULT_PASS: ${RABBITMQ_DEFAULT_PASS} @@ -30,8 +25,6 @@ services: build: context: ./clickhouse container_name: clickhouse - ports: - - "8123:8123" volumes: - type: volume source: clickhouse-data @@ -52,8 +45,6 @@ services: build: context: ./semantic-search-service container_name: semantic-search-service - ports: - - "8080:8080" depends_on: - qdrant environment: @@ -64,8 +55,6 @@ services: postgres: image: postgres:16 - ports: - - "5432:5432" volumes: - postgres-data:/var/lib/postgresql/data environment: @@ -82,8 +71,6 @@ services: build: context: ./python-executor container_name: python-executor - ports: - - "8811:8811" app-server: build: @@ -91,9 +78,6 @@ services: args: DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} container_name: app-server - ports: - - "8000:8000" - - "8001:8001" depends_on: semantic-search-service: condition: service_started diff --git a/docker-compose-local-dev-full.yml b/docker-compose-local-dev-full.yml index 07371f11..69aa8774 100644 --- a/docker-compose-local-dev-full.yml +++ b/docker-compose-local-dev-full.yml @@ -13,7 +13,7 @@ services: - type: volume source: qdrant-data target: /data - + rabbitmq: image: rabbitmq ports: @@ -53,7 +53,7 @@ services: postgres: image: postgres:16 ports: - - "5432:5432" + - "5433:5432" volumes: - postgres-data:/var/lib/postgresql/data environment: @@ -61,7 +61,7 @@ services: POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: ${POSTGRES_DB} healthcheck: - test: ["CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}"] + test: [ "CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}" ] interval: 2s timeout: 5s retries: 5 diff --git a/docker-compose-local-dev.yml b/docker-compose-local-dev.yml index 0462cf72..03b127b1 100644 --- a/docker-compose-local-dev.yml +++ b/docker-compose-local-dev.yml @@ -11,7 +11,7 @@ services: postgres: image: postgres:16 ports: - - "5432:5432" + - "5433:5432" volumes: - postgres-data:/var/lib/postgresql/data environment: @@ -19,11 +19,33 @@ services: POSTGRES_PASSWORD: ${POSTGRES_PASSWORD} POSTGRES_DB: ${POSTGRES_DB} healthcheck: - test: ["CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}"] + test: [ "CMD", "pg_isready", "-U", "${POSTGRES_USER}", "-d", "${POSTGRES_DB}" ] interval: 2s timeout: 5s retries: 5 + clickhouse: + build: + context: ./clickhouse + container_name: clickhouse + ports: + - "8123:8123" + volumes: + - type: volume + source: clickhouse-data + target: /var/lib/clickhouse/ + - type: volume + source: clickhouse-logs + target: /var/log/clickhouse-server/ + cap_add: + - SYS_NICE + - NET_ADMIN + - IPC_LOCK + ulimits: + nofile: + soft: 262144 + hard: 262144 + app-server: image: ghcr.io/lmnr-ai/app-server pull_policy: always @@ -33,12 +55,19 @@ services: depends_on: postgres: condition: service_healthy + clickhouse: + condition: service_started + environment: PORT: 8000 GRPC_PORT: 8001 DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} SHARED_SECRET_TOKEN: ${SHARED_SECRET_TOKEN} - ENVIRONMENT: LITE # this disables runtime dependency on clickhouse, rabbitmq, semantic search, and python executor + CLICKHOUSE_URL: http://clickhouse:8123 + CLICKHOUSE_USER: ${CLICKHOUSE_USER} + ENVIRONMENT: LITE # this disables runtime dependency on rabbitmq, semantic search, and python executor volumes: postgres-data: + clickhouse-data: + clickhouse-logs: diff --git a/docker-compose.yml b/docker-compose.yml index ae03fd30..5c2312d6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -9,8 +9,6 @@ name: lmnr services: postgres: image: postgres:16 - ports: - - "5432:5432" volumes: - postgres-data:/var/lib/postgresql/data environment: @@ -23,39 +21,62 @@ services: timeout: 5s retries: 5 + clickhouse: + build: + context: ./clickhouse + container_name: clickhouse + volumes: + - type: volume + source: clickhouse-data + target: /var/lib/clickhouse/ + - type: volume + source: clickhouse-logs + target: /var/log/clickhouse-server/ + cap_add: + - SYS_NICE + - NET_ADMIN + - IPC_LOCK + ulimits: + nofile: + soft: 262144 + hard: 262144 + frontend: image: ghcr.io/lmnr-ai/frontend pull_policy: always ports: - - "3000:3000" + - "5667:5667" depends_on: postgres: condition: service_healthy environment: DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} - PORT: 3000 + PORT: 5667 BACKEND_URL: http://app-server:8000 SHARED_SECRET_TOKEN: ${SHARED_SECRET_TOKEN} - NEXTAUTH_URL: http://localhost:3000 + NEXTAUTH_URL: http://localhost:5667 NEXTAUTH_SECRET: some_secret - NEXT_PUBLIC_URL: http://localhost:3000 + NEXT_PUBLIC_URL: http://localhost:5667 ENVIRONMENT: LITE # this disables runtime dependency on clickhouse app-server: image: ghcr.io/lmnr-ai/app-server pull_policy: always - ports: - - "8000:8000" - - "8001:8001" depends_on: postgres: condition: service_healthy + clickhouse: + condition: service_started environment: PORT: 8000 GRPC_PORT: 8001 DATABASE_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@postgres:5432/${POSTGRES_DB} SHARED_SECRET_TOKEN: ${SHARED_SECRET_TOKEN} - ENVIRONMENT: LITE # this disables runtime dependency on clickhouse, rabbitmq, semantic search, and python executor + CLICKHOUSE_URL: http://clickhouse:8123 + CLICKHOUSE_USER: ${CLICKHOUSE_USER} + ENVIRONMENT: LITE # this disables runtime dependency on rabbitmq, semantic search, and python executor volumes: postgres-data: + clickhouse-data: + clickhouse-logs: diff --git a/frontend/.env.local.example b/frontend/.env.local.example index f5821fe4..9cdab29a 100644 --- a/frontend/.env.local.example +++ b/frontend/.env.local.example @@ -4,16 +4,16 @@ BACKEND_URL=http://localhost:8000 SEMANTIC_SEARCH_URL=http://localhost:8080 NEXT_OTEL_FETCH_DISABLED=1 SHARED_SECRET_TOKEN=some_secret -ENVIRONMENT=LITE # these must match what you have in your docker-compose-local-dev.yml for postgres -# postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@{host}:5432/${POSTGRES_DB} -DATABASE_URL="postgres://postgres:postgres_passwordabc@localhost:5432/postgres" +# postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@{host}:${POSTGRES_PORT}/${POSTGRES_DB} +DATABASE_URL="postgres://postgres:postgres_passwordabc@localhost:5433/postgres" # replace with FULL if you are testing with a full stack # ENVIRONMENT=FULL -ENVIRONMENT=LITE # this disables runtime dependency on clickhouse - +ENVIRONMENT=LITE +CLICKHOUSE_USER=default +CLICKHOUSE_URL=http://localhost:8123 # for realtime SUPABASE_JWT_SECRET= # for auth diff --git a/frontend/.gitignore b/frontend/.gitignore index c87c9b39..d8fdd897 100644 --- a/frontend/.gitignore +++ b/frontend/.gitignore @@ -34,3 +34,5 @@ yarn-error.log* # typescript *.tsbuildinfo next-env.d.ts + +.idea \ No newline at end of file diff --git a/frontend/app/api/projects/[projectId]/spans/route.ts b/frontend/app/api/projects/[projectId]/spans/route.ts index 6f7295c9..0933d5cc 100644 --- a/frontend/app/api/projects/[projectId]/spans/route.ts +++ b/frontend/app/api/projects/[projectId]/spans/route.ts @@ -7,7 +7,7 @@ import { db } from '@/lib/db/drizzle'; import { labelClasses, labels, spans, traces } from '@/lib/db/migrations/schema'; import { FilterDef, filtersToSql } from '@/lib/db/modifiers'; import { getDateRangeFilters, paginatedGet } from '@/lib/db/utils'; -import { Span, TraceSearchResponse } from '@/lib/traces/types'; +import {Span, TraceSearchResponse} from '@/lib/traces/types'; import { fetcher } from '@/lib/utils'; export async function GET(req: NextRequest, props: { params: Promise<{ projectId: string }> }): Promise { @@ -101,6 +101,8 @@ export async function GET(req: NextRequest, props: { params: Promise<{ projectId const uppercased = filter.value.toUpperCase().trim(); filter.value = (uppercased === 'SPAN') ? "'DEFAULT'" : `'${uppercased}'`; filter.castType = "span_type"; + } else if (filter.column === 'model') { + filter.column = "COALESCE(attributes ->> 'gen_ai.response.model', attributes ->> 'gen_ai.request.model')"; } return filter; }); @@ -141,6 +143,7 @@ export async function GET(req: NextRequest, props: { params: Promise<{ projectId ...columns, latency: sql`EXTRACT(EPOCH FROM (end_time - start_time))`.as("latency"), path: sql`attributes ->> 'lmnr.span.path'`.as("path"), + model: sql`COALESCE(attributes ->> 'gen_ai.response.model', attributes ->> 'gen_ai.request.model')`.as('model') } }); diff --git a/frontend/app/project/[projectId]/layout.tsx b/frontend/app/project/[projectId]/layout.tsx index 46c82e47..debeb9d3 100644 --- a/frontend/app/project/[projectId]/layout.tsx +++ b/frontend/app/project/[projectId]/layout.tsx @@ -17,9 +17,9 @@ import { GetProjectResponse } from '@/lib/workspaces/types'; export default async function ProjectIdLayout( props: { - children: React.ReactNode; - params: Promise<{ projectId: string }>; - } + children: React.ReactNode; + params: Promise<{ projectId: string }>; + } ) { const params = await props.params; @@ -45,9 +45,9 @@ export default async function ProjectIdLayout( const project = projectResponse as GetProjectResponse; const showBanner = - isFeatureEnabled(Feature.WORKSPACE) && - project.isFreeTier && - project.spansThisMonth >= 0.8 * project.spansLimit; + isFeatureEnabled(Feature.WORKSPACE) && + project.isFreeTier && + project.spansThisMonth >= 0.8 * project.spansLimit; const posthog = PostHogClient(); posthog.identify({ @@ -71,7 +71,6 @@ export default async function ProjectIdLayout(
diff --git a/frontend/components/project/project-navbar.tsx b/frontend/components/project/project-navbar.tsx index c52c7b71..e9b18e34 100644 --- a/frontend/components/project/project-navbar.tsx +++ b/frontend/components/project/project-navbar.tsx @@ -26,10 +26,9 @@ import AvatarMenu from '../user/avatar-menu'; interface ProjectNavBarProps { projectId: string; - fullBuild: boolean; } -export default function ProjectNavbar({ projectId, fullBuild }: ProjectNavBarProps) { +export default function ProjectNavbar({ projectId }: ProjectNavBarProps) { const pathname = usePathname(); const { open, openMobile } = useSidebar(); const [showStarCard, setShowStarCard] = useState(false); @@ -97,13 +96,6 @@ export default function ProjectNavbar({ projectId, fullBuild }: ProjectNavBarPro } ]; - const navbarOptions = allOptions.filter(option => { - if (!fullBuild) { - return !['dashboard'].includes(option.name); - } - return true; - }); - return ( @@ -118,7 +110,7 @@ export default function ProjectNavbar({ projectId, fullBuild }: ProjectNavBarPro - {navbarOptions.map((option, i) => ( + {allOptions.map((option, i) => (

- Laminar is fully open source + Laminar is fully open source

- ⭐ Star it on GitHub + ⭐ Star it on GitHub
)} diff --git a/frontend/components/traces/spans-table.tsx b/frontend/components/traces/spans-table.tsx index 5dbefe03..ccd7c5bb 100644 --- a/frontend/components/traces/spans-table.tsx +++ b/frontend/components/traces/spans-table.tsx @@ -346,6 +346,11 @@ export default function SpansTable({ onRowClick }: SpansTableProps) { , size: 100 + }, + { + header: 'Model', + accessorKey: 'model', + id: 'model' } ]; @@ -387,7 +392,12 @@ export default function SpansTable({ onRowClick }: SpansTableProps) { id: 'labels', name: 'Labels', restrictOperators: ['eq'], + }, + { + id: 'model', + name: 'Model', } + ]; return ( diff --git a/frontend/lib/clickhouse/evaluation-scores.ts b/frontend/lib/clickhouse/evaluation-scores.ts index b5bac59a..e03e35bd 100644 --- a/frontend/lib/clickhouse/evaluation-scores.ts +++ b/frontend/lib/clickhouse/evaluation-scores.ts @@ -1,7 +1,6 @@ import { ClickHouseClient } from "@clickhouse/client"; import { EvaluationTimeProgression } from "../evaluation/types"; -import { Feature, isFeatureEnabled } from "../features/features"; import { addTimeRangeToQuery, AggregationFunction, aggregationFunctionToCh, TimeRange } from "./utils"; export const getEvaluationTimeProgression = async ( @@ -11,9 +10,6 @@ export const getEvaluationTimeProgression = async ( timeRange: TimeRange, aggregationFunction: AggregationFunction, ): Promise => { - if (!isFeatureEnabled(Feature.FULL_BUILD)) { - return []; - } const query = `WITH base AS ( SELECT evaluation_id, diff --git a/frontend/lib/clickhouse/spans.ts b/frontend/lib/clickhouse/spans.ts index 930062de..48f300db 100644 --- a/frontend/lib/clickhouse/spans.ts +++ b/frontend/lib/clickhouse/spans.ts @@ -1,6 +1,5 @@ import { ClickHouseClient } from "@clickhouse/client"; -import { Feature, isFeatureEnabled } from "../features/features"; import { GroupByInterval, truncateTimeMap } from "./modifiers"; import { addTimeRangeToQuery, @@ -72,9 +71,6 @@ export const getSpanMetricsOverTime = async ( groupBy: SpanMetricGroupBy, aggregation: AggregationFunction, ): Promise[]> => { - if (!isFeatureEnabled(Feature.FULL_BUILD)) { - return []; - } const chRoundTime = truncateTimeMap[groupByInterval]; @@ -135,10 +131,6 @@ export const getSpanMetricsSummary = async ( groupBy: SpanMetricGroupBy, aggregation: AggregationFunction, ): Promise => { - if (!isFeatureEnabled(Feature.FULL_BUILD)) { - return []; - } - const baseQuery = ` SELECT ${groupBy}, diff --git a/frontend/lib/traces/types.ts b/frontend/lib/traces/types.ts index b5712ab6..2cd231ca 100644 --- a/frontend/lib/traces/types.ts +++ b/frontend/lib/traces/types.ts @@ -65,6 +65,7 @@ export type Span = { events: Event[]; labels: SpanLabel[]; path: string; + model?: string; }; export type TraceWithSpans = {