Skip to content

Commit

Permalink
add multi pipeline test results, install components from remote repos…
Browse files Browse the repository at this point in the history
…itory
  • Loading branch information
hisarbalik committed Jan 19, 2024
1 parent d0d655b commit cb24a01
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 12 deletions.
10 changes: 6 additions & 4 deletions docs/contributor/performance-tests/traces/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -80,10 +80,12 @@ All test scenarios use a single test script [run-load-test.sh](assets/run-load-t

## Test Results

| Test Name | Receiver Accepted Spans / sec | Exporter Exported Spans / sec | Exporter Queue Size | Pod Memory Usage (Bytes) | Pod CPU Usage |
|:--------------------------------------:|-----------------------------------------------:|------------------------------:|--------------------:|--------------------------:|--------------:|
| OTEL Image Version 0.91.0 Throughput | 19815.05 | 19815.05 | 0 | 137007232, 139920064 | 0.979, 0.921 |
| OTEL Image Version 0.91.0 Backpressure | 9574.4 | 1280 | 509 | 1929478144, 1726021632 | 0.723, 0.702 |
| Test Name | Receiver Accepted Spans / sec | Exporter Exported Spans / sec | Exporter Queue Size | Pod Memory Usage (Bytes) | Pod CPU Usage |
|:----------------------------------------------------:|-----------------------------------------------:|---------------------------------:|--------------------:|---------------------------:|------------------:|
| OTEL Image Version 0.91.0 Throughput | 19815.05 | 19815.05 | 0 | 137007232, 139920064 | 0.979, 0.921 |
| OTEL Image Version 0.91.0 Backpressure | 9574.4 | 1280 | 509 | 1929478144, 1726021632 | 0.723, 0.702 |
| OTEL Image Version 0.91.0 MultiPipeline Throughput | 13158.4 | 38929.06 | 0 | 117362688, 98566144 | 1.307, 1.351 |
| OTEL Image Version 0.91.0 MultiPipeline Backpressure | 9663.8 | 1331.2 | 510 | 2029858816, 1686208512 | 0.733, 0.696 |


Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
#!/bin/sh

JQ_COLORS="0;90:0;39:0;39:0;39:0;32:1;39:1;39:1;34"
PROMETHEUS_NAMESPACE="prometheus"
HELM_PROM_RELEASE="prometheus"
NAMESPACE="trace-load-test"
Expand All @@ -20,16 +19,16 @@ function setup() {
# Deploy prometheus
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo update
helm upgrade --install -n ${PROMETHEUS_NAMESPACE} ${HELM_PROM_RELEASE} prometheus-community/kube-prometheus-stack -f values.yaml --set grafana.adminPassword=myPwd
helm upgrade --install -n ${PROMETHEUS_NAMESPACE} ${HELM_PROM_RELEASE} prometheus-community/kube-prometheus-stack -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/values.yaml --set grafana.adminPassword=myPwd

if "$MAX_PIPELINE"; then
kubectl apply -f trace-max-pipeline.yaml
kubectl apply -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/trace-max-pipeline.yaml
fi
# Deploy test setup
kubectl apply -f trace-load-test-setup.yaml
kubectl apply -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/trace-load-test-setup.yaml

if "$BACKPRESSURE_TEST"; then
kubectl apply -f trace-backpressure-config.yaml
kubectl apply -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/trace-backpressure-config.yaml
sleep 3
kubectl rollout restart deployment trace-receiver -n trace-load-test
fi
Expand Down Expand Up @@ -77,15 +76,15 @@ function cleanup() {

curl -fs --data-urlencode 'query=avg(sum(otelcol_exporter_queue_size{service="telemetry-trace-collector-metrics"}))' localhost:9090/api/v1/query | jq -r '.data.result[] | [ "Exporter queue size", "Average", .value[1] ] | @csv' | xargs printf "\033[0;31m %s \033[0m \n"

curl -fs --data-urlencode 'query=sum(container_memory_working_set_bytes{namespace="kyma-system"} * on(namespace,pod) group_left(workload) namespace_workload_pod:kube_pod_owner:relabel{namespace="kyma-system", workload="telemetry-trace-collector"}) by (pod)' localhost:9090/api/v1/query | jq -r '.data.result[] | [ "Pod memory", .metric.pod, .value[1] ] | @csv' | xargs printf "\033[0;31m %s \033[0m \n"
curl -fs --data-urlencode 'query=sum(container_memory_working_set_bytes{namespace="kyma-system", container="collector"} * on(namespace,pod) group_left(workload) namespace_workload_pod:kube_pod_owner:relabel{namespace="kyma-system", workload="telemetry-trace-collector"}) by (pod)' localhost:9090/api/v1/query | jq -r '.data.result[] | [ "Pod memory", .metric.pod, .value[1] ] | @csv' | xargs printf "\033[0;31m %s \033[0m \n"

curl -fs --data-urlencode 'query=sum(node_namespace_pod_container:container_cpu_usage_seconds_total:sum_irate{namespace="kyma-system"} * on(namespace,pod) group_left(workload) namespace_workload_pod:kube_pod_owner:relabel{namespace="kyma-system", workload="telemetry-trace-collector"}) by (pod)' localhost:9090/api/v1/query | jq -r '.data.result[] | [ "Pod CPU", .metric.pod, .value[1] ] | @csv' | xargs printf "\033[0;31m %s \033[0m \n"
kill %1

if "$MAX_PIPELINE"; then
kubectl delete -f trace-max-pipeline.yaml
kubectl delete -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/trace-max-pipeline.yaml
fi
kubectl delete -f trace-load-test-setup.yaml
kubectl delete -f https://raw.githubusercontent.com/kyma-project/telemetry-manager/main/docs/contributor/performance-tests/traces/assets/trace-load-test-setup.yaml

helm delete -n ${PROMETHEUS_NAMESPACE} ${HELM_PROM_RELEASE}
}
Expand Down

0 comments on commit cb24a01

Please sign in to comment.