From ce1135633e2080817a07abba1bdad02755a82528 Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Mon, 3 Feb 2025 19:24:14 +0530 Subject: [PATCH 01/33] chore(mixin): add thanos object store dashboard (#16052) --- .../loki_thanos_object_storage.json | 776 ++++++++++++++++++ .../loki_thanos_object_storage.json | 776 ++++++++++++++++++ production/loki-mixin/.lint | 5 + production/loki-mixin/dashboards.libsonnet | 3 +- .../dashboards/loki-object-store.libsonnet | 72 ++ 5 files changed, 1631 insertions(+), 1 deletion(-) create mode 100644 production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json create mode 100644 production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json create mode 100644 production/loki-mixin/dashboards/loki-object-store.libsonnet diff --git a/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json b/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json new file mode 100644 index 0000000000000..d81f10f9dd3a6 --- /dev/null +++ b/production/loki-mixin-compiled-ssd/dashboards/loki_thanos_object_storage.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "loki" + ], + "targetBlank": false, + "title": "Loki Dashboards", + "type": "dashboards" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 1, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operations_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "RPS / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 2, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "Error rate / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 3, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{cluster=\"$cluster\", namespace=~\"$namespace\", status_code!~\"2..\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{method}} - {{status_code}}", + "legendLink": null + } + ], + "title": "Transport error rate / method and status code", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Operations", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 4, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Get", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 5, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: GetRange", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 6, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Exists", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 7, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Attributes", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 8, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Upload", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 9, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Delete", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "loki" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ ], + "query": "label_values(loki_build_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ ], + "query": "label_values(loki_build_info{cluster=~\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Loki / Object Store Thanos", + "uid": "object-store", + "version": 0 + } \ No newline at end of file diff --git a/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json b/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json new file mode 100644 index 0000000000000..d81f10f9dd3a6 --- /dev/null +++ b/production/loki-mixin-compiled/dashboards/loki_thanos_object_storage.json @@ -0,0 +1,776 @@ +{ + "annotations": { + "list": [ ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "links": [ + { + "asDropdown": true, + "icon": "external link", + "includeVars": true, + "keepTime": true, + "tags": [ + "loki" + ], + "targetBlank": false, + "title": "Loki Dashboards", + "type": "dashboards" + } + ], + "refresh": "10s", + "rows": [ + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 1, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operations_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "RPS / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 2, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{cluster=\"$cluster\", namespace=~\"$namespace\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{operation}}", + "legendLink": null + } + ], + "title": "Error rate / operation", + "type": "timeseries" + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "reqps" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 3, + "links": [ ], + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{cluster=\"$cluster\", namespace=~\"$namespace\", status_code!~\"2..\"}[$__rate_interval])) > 0", + "format": "time_series", + "legendFormat": "{{method}} - {{status_code}}", + "legendLink": null + } + ], + "title": "Transport error rate / method and status code", + "type": "timeseries" + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "Operations", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 4, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Get", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 5, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"get_range\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: GetRange", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 6, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"exists\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Exists", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + }, + { + "collapse": false, + "collapsed": false, + "panels": [ + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 7, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"attributes\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Attributes", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 8, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"upload\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Upload", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + }, + { + "datasource": "$datasource", + "fieldConfig": { + "defaults": { + "custom": { + "drawStyle": "line", + "fillOpacity": 10, + "lineWidth": 1, + "pointSize": 5, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + } + }, + "thresholds": { + "mode": "absolute", + "steps": [ ] + }, + "unit": "ms" + }, + "overrides": [ ] + }, + "gridPos": { }, + "id": 9, + "links": [ ], + "nullPointMode": "null as zero", + "options": { + "legend": { + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "expr": "histogram_quantile(0.99, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "99th Percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(loki_objstore_bucket_operation_duration_seconds_bucket{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) by (le)) * 1e3", + "format": "time_series", + "legendFormat": "50th Percentile", + "refId": "B" + }, + { + "expr": "sum(rate(loki_objstore_bucket_operation_duration_seconds_sum{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval])) * 1e3 / sum(rate(loki_objstore_bucket_operation_duration_seconds_count{cluster=\"$cluster\", namespace=~\"$namespace\",operation=\"delete\"}[$__rate_interval]))", + "format": "time_series", + "legendFormat": "Average", + "refId": "C" + } + ], + "title": "Op: Delete", + "type": "timeseries", + "yaxes": [ + { + "format": "ms", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ] + } + ], + "repeat": null, + "repeatIteration": null, + "repeatRowId": null, + "showTitle": true, + "title": "", + "titleSize": "h6", + "type": "row" + } + ], + "schemaVersion": 14, + "style": "dark", + "tags": [ + "loki" + ], + "templating": { + "list": [ + { + "current": { + "text": "default", + "value": "default" + }, + "hide": 0, + "label": "Data source", + "name": "datasource", + "options": [ ], + "query": "prometheus", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "cluster", + "multi": false, + "name": "cluster", + "options": [ ], + "query": "label_values(loki_build_info, cluster)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": { + "text": "prod", + "value": "prod" + }, + "datasource": "$datasource", + "hide": 0, + "includeAll": false, + "label": "namespace", + "multi": false, + "name": "namespace", + "options": [ ], + "query": "label_values(loki_build_info{cluster=~\"$cluster\"}, namespace)", + "refresh": 1, + "regex": "", + "sort": 2, + "tagValuesQuery": "", + "tags": [ ], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "utc", + "title": "Loki / Object Store Thanos", + "uid": "object-store", + "version": 0 + } \ No newline at end of file diff --git a/production/loki-mixin/.lint b/production/loki-mixin/.lint index d8c2ddc8b956e..31c3b84367242 100644 --- a/production/loki-mixin/.lint +++ b/production/loki-mixin/.lint @@ -14,6 +14,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" template-datasource-rule: reason: "Based on new convention we are using variable names prometheus_datasource and loki_datasource where as linter expects 'datasource'" entries: @@ -29,6 +30,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" template-instance-rule: reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" entries: @@ -44,6 +46,7 @@ exclusions: - dashboard: "Loki / Writes Resources" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-instance-rule: reason: "These dashboards are cluster overview dashboards, whereas the instance refers to specific pods or nodes" entries: @@ -59,6 +62,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-job-rule: reason: "We don't have/need a job template selector for this dashboard" entries: @@ -74,6 +78,7 @@ exclusions: - dashboard: "Loki / Writes" - dashboard: "Loki / Bloom Build" - dashboard: "Loki / Bloom Gateway" + - dashboard: "Loki / Object Store Thanos" target-promql-rule: reason: "The following are logql queries, not promql" entries: diff --git a/production/loki-mixin/dashboards.libsonnet b/production/loki-mixin/dashboards.libsonnet index cb1b5d5161778..217be8487f2f5 100644 --- a/production/loki-mixin/dashboards.libsonnet +++ b/production/loki-mixin/dashboards.libsonnet @@ -11,4 +11,5 @@ (import 'dashboards/loki-canary-dashboard.libsonnet') + (import 'dashboards/recording-rules.libsonnet') + (import 'dashboards/loki-bloom-build.libsonnet') + -(import 'dashboards/loki-bloom-gateway.libsonnet') +(import 'dashboards/loki-bloom-gateway.libsonnet') + +(import 'dashboards/loki-object-store.libsonnet') diff --git a/production/loki-mixin/dashboards/loki-object-store.libsonnet b/production/loki-mixin/dashboards/loki-object-store.libsonnet new file mode 100644 index 0000000000000..e5de659828c5f --- /dev/null +++ b/production/loki-mixin/dashboards/loki-object-store.libsonnet @@ -0,0 +1,72 @@ +local grafana = import 'grafonnet/grafana.libsonnet'; +local row = grafana.row; + +{ + grafanaDashboards+:: { + local cluster_namespace_matcher = 'cluster="$cluster", namespace=~"$namespace"', + local dashboard = ( + (import 'dashboard-utils.libsonnet') + { + _config+:: $._config, + } + ), + 'loki_thanos_object_storage.json': + dashboard.dashboard('Loki / Object Store Thanos', uid='object-store') + .addCluster() + .addNamespace() + .addTag() + .addRow( + row.new('Operations') + .addPanel( + $.newQueryPanel('RPS / operation', 'reqps') + + $.queryPanel( + 'sum by(operation) (rate(loki_objstore_bucket_operations_total{%s}[$__rate_interval]))' % cluster_namespace_matcher, + '{{operation}}' + ) + ) + .addPanel( + $.newQueryPanel('Error rate / operation', 'reqps') + + $.queryPanel( + 'sum by(operation) (rate(loki_objstore_bucket_operation_failures_total{%s}[$__rate_interval])) > 0' % cluster_namespace_matcher, + '{{operation}}' + ) + ) + .addPanel( + $.newQueryPanel('Transport error rate / method and status code', 'reqps') + + $.queryPanel( + 'sum by (method, status_code) (rate(loki_objstore_bucket_transport_requests_total{%s, status_code!~"2.."}[$__rate_interval])) > 0' % cluster_namespace_matcher, + '{{method}} - {{status_code}}' + ) + ) + ) + .addRow( + row.new('') + .addPanel( + $.newQueryPanel('Op: Get', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="get"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: GetRange', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="get_range"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Exists', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="exists"}' % cluster_namespace_matcher) + ) + ) + .addRow( + row.new('') + .addPanel( + $.newQueryPanel('Op: Attributes', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="attributes"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Upload', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="upload"}' % cluster_namespace_matcher) + ) + .addPanel( + $.newQueryPanel('Op: Delete', 'ms') + + $.latencyPanel('loki_objstore_bucket_operation_duration_seconds', '{%s,operation="delete"}' % cluster_namespace_matcher) + ) + ), + }, +} From bb76772f413125037fe76c98a10cd7f205c087ad Mon Sep 17 00:00:00 2001 From: George Robinson Date: Mon, 3 Feb 2025 13:58:16 +0000 Subject: [PATCH 02/33] fix: reverts the tailwindcss upgrade that was breaking builds (#16058) --- pkg/dataobj/explorer/ui/package-lock.json | 1385 ++++++++++++++++++++- pkg/dataobj/explorer/ui/package.json | 2 +- 2 files changed, 1347 insertions(+), 40 deletions(-) diff --git a/pkg/dataobj/explorer/ui/package-lock.json b/pkg/dataobj/explorer/ui/package-lock.json index be8056d5d2522..ef3e50768ae31 100644 --- a/pkg/dataobj/explorer/ui/package-lock.json +++ b/pkg/dataobj/explorer/ui/package-lock.json @@ -20,11 +20,24 @@ "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.16", "postcss": "^8.4.32", - "tailwindcss": "^4.0.0", + "tailwindcss": "^3.4.0", "typescript": "^5.2.2", "vite": "^6.0.0" } }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/@ampproject/remapping": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", @@ -736,6 +749,24 @@ "node": ">=18" } }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.8", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", @@ -789,6 +820,55 @@ "@jridgewell/sourcemap-codec": "^1.4.14" } }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@rollup/rollup-android-arm-eabi": { "version": "4.31.0", "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.31.0.tgz", @@ -1163,6 +1243,60 @@ "vite": "^4.2.0 || ^5.0.0 || ^6.0.0" } }, + "node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, "node_modules/autoprefixer": { "version": "10.4.20", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz", @@ -1201,6 +1335,49 @@ "postcss": "^8.1.0" } }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/browserslist": { "version": "4.24.4", "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.24.4.tgz", @@ -1234,6 +1411,16 @@ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" } }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/caniuse-lite": { "version": "1.0.30001695", "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001695.tgz", @@ -1255,6 +1442,74 @@ ], "license": "CC-BY-4.0" }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/convert-source-map": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", @@ -1271,6 +1526,34 @@ "node": ">=18" } }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/csstype": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", @@ -1306,6 +1589,27 @@ } } }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, "node_modules/electron-to-chromium": { "version": "1.5.83", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.83.tgz", @@ -1313,6 +1617,13 @@ "dev": true, "license": "ISC" }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, "node_modules/esbuild": { "version": "0.24.2", "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.24.2.tgz", @@ -1364,6 +1675,76 @@ "node": ">=6" } }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fastq": { + "version": "1.19.0", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.19.0.tgz", + "integrity": "sha512-7SFSRCNjBQIZH/xZR3iy5iQYR8aGBE0h3VG6/cwlbrpdciNYBMotQav8c1XI3HjHH+NikUpP53nPdlZSdWmFzA==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/foreground-child": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.0.tgz", + "integrity": "sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.0", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/fraction.js": { "version": "4.3.7", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz", @@ -1393,6 +1774,16 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/gensync": { "version": "1.0.0-beta.2", "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz", @@ -1403,6 +1794,40 @@ "node": ">=6.9.0" } }, + "node_modules/glob": { + "version": "10.4.5", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, "node_modules/globals": { "version": "11.12.0", "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz", @@ -1413,61 +1838,237 @@ "node": ">=4" } }, - "node_modules/jiti": { - "version": "1.21.7", - "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", - "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "dev": true, "license": "MIT", - "optional": true, - "peer": true, - "bin": { - "jiti": "bin/jiti.js" + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" } }, - "node_modules/js-tokens": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", - "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", "dev": true, - "license": "MIT" + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } }, - "node_modules/jsesc": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", - "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", "dev": true, "license": "MIT", - "bin": { - "jsesc": "bin/jsesc" + "dependencies": { + "hasown": "^2.0.2" }, "engines": { - "node": ">=6" + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/json5": { - "version": "2.2.3", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", - "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", "dev": true, "license": "MIT", - "bin": { - "json5": "lib/cli.js" - }, "engines": { - "node": ">=6" + "node": ">=0.10.0" } }, - "node_modules/lru-cache": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", - "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", "dev": true, "license": "ISC", "dependencies": { "yallist": "^3.0.2" } }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -1475,6 +2076,18 @@ "dev": true, "license": "MIT" }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, "node_modules/nanoid": { "version": "3.3.8", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz", @@ -1501,6 +2114,16 @@ "dev": true, "license": "MIT" }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/normalize-range": { "version": "0.1.2", "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz", @@ -1511,6 +2134,74 @@ "node": ">=0.10.0" } }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -1518,6 +2209,39 @@ "dev": true, "license": "ISC" }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pirates": { + "version": "4.0.6", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.6.tgz", + "integrity": "sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, "node_modules/postcss": { "version": "8.5.1", "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.1.tgz", @@ -1547,6 +2271,120 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.0.1.tgz", + "integrity": "sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==", + "dev": true, + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-4.0.2.tgz", + "integrity": "sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.0.0", + "yaml": "^2.3.4" + }, + "engines": { + "node": ">= 14" + }, + "peerDependencies": { + "postcss": ">=8.0.9", + "ts-node": ">=9.0.0" + }, + "peerDependenciesMeta": { + "postcss": { + "optional": true + }, + "ts-node": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, "node_modules/postcss-value-parser": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", @@ -1554,6 +2392,27 @@ "dev": true, "license": "MIT" }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, "node_modules/react": { "version": "19.0.0", "resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz", @@ -1625,6 +2484,61 @@ "react-dom": ">=18" } }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, "node_modules/rollup": { "version": "4.31.0", "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.31.0.tgz", @@ -1664,6 +2578,30 @@ "fsevents": "~2.3.2" } }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, "node_modules/scheduler": { "version": "0.25.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.25.0.tgz", @@ -1686,6 +2624,42 @@ "integrity": "sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ==", "license": "MIT" }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -1696,13 +2670,227 @@ "node": ">=0.10.0" } }, - "node_modules/tailwindcss": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.0.3.tgz", - "integrity": "sha512-ImmZF0Lon5RrQpsEAKGxRvHwCvMgSC4XVlFRqmbzTEDb/3wvin9zfEZrMwgsa3yqBbPqahYcVI6lulM2S7IZAA==", + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", "dev": true, "license": "MIT" }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/sucrase": { + "version": "3.35.0", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.0.tgz", + "integrity": "sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "glob": "^10.3.10", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tailwindcss": { + "version": "3.4.17", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.17.tgz", + "integrity": "sha512-w33E2aCvSDP0tW9RZuNXadXlkHXqFzSkQew/aIa2i/Sj8fThxwovwlXHSPXTbAHwEIhBFXAedUhP2tueAKP8Og==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.6", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, "node_modules/turbo-stream": { "version": "2.4.0", "resolved": "https://registry.npmjs.org/turbo-stream/-/turbo-stream-2.4.0.tgz", @@ -1761,6 +2949,13 @@ "browserslist": ">= 4.21.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, "node_modules/vite": { "version": "6.0.11", "resolved": "https://registry.npmjs.org/vite/-/vite-6.0.11.tgz", @@ -1833,6 +3028,120 @@ } } }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/yallist": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz", @@ -1846,8 +3155,6 @@ "integrity": "sha512-+hSoy/QHluxmC9kCIJyL/uyFmLmc+e5CFR5Wa+bpIhIj85LVb9ZH2nVnqrHoSvKogwODv0ClqZkmiSSaIH5LTA==", "dev": true, "license": "ISC", - "optional": true, - "peer": true, "bin": { "yaml": "bin.mjs" }, diff --git a/pkg/dataobj/explorer/ui/package.json b/pkg/dataobj/explorer/ui/package.json index 97f030622c5a3..6d395838764e9 100644 --- a/pkg/dataobj/explorer/ui/package.json +++ b/pkg/dataobj/explorer/ui/package.json @@ -21,7 +21,7 @@ "@vitejs/plugin-react": "^4.2.1", "autoprefixer": "^10.4.16", "postcss": "^8.4.32", - "tailwindcss": "^4.0.0", + "tailwindcss": "^3.4.0", "typescript": "^5.2.2", "vite": "^6.0.0" } From cdb8569164e8d999634d2438ff813893ff4f713c Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Mon, 3 Feb 2025 09:23:26 -0500 Subject: [PATCH 03/33] chore(deps): Disable tailwind updates (#16060) --- .github/renovate.json5 | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/renovate.json5 b/.github/renovate.json5 index 494ced9f54329..d29c5cc13ca7d 100644 --- a/.github/renovate.json5 +++ b/.github/renovate.json5 @@ -36,6 +36,12 @@ "matchManagers": ["kustomize"], "enabled": false }, + { + // Disable certain npm updates for compatibility reasons + "matchManagers": ["npm"], + "matchPackageNames": ["tailwindcss"], + "enabled": false + }, { // Don't automatically merge GitHub Actions updates "matchManagers": ["github-actions"], From 78b46965bad5e197d507897729fb29e17a4ee0ae Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Mon, 3 Feb 2025 11:42:16 -0700 Subject: [PATCH 04/33] chore: revert "fix: scheduling constraints (#16045)" (#16064) --- production/ksonnet/loki/multi-zone.libsonnet | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/production/ksonnet/loki/multi-zone.libsonnet b/production/ksonnet/loki/multi-zone.libsonnet index 1f43eb1c86d0a..a3d48f21a96d8 100644 --- a/production/ksonnet/loki/multi-zone.libsonnet +++ b/production/ksonnet/loki/multi-zone.libsonnet @@ -30,7 +30,6 @@ local rolloutOperator = import 'rollout-operator.libsonnet'; // If use_topology_spread is false, ingesters will not be scheduled on nodes already running ingesters. multi_zone_ingester_use_topology_spread: false, multi_zone_ingester_topology_spread_max_skew: 1, - multi_zone_ingester_topology_spread_when_unsatisfiable: 'ScheduleAnyway', node_selector: null, }, @@ -117,7 +116,7 @@ local rolloutOperator = import 'rollout-operator.libsonnet'; // Evenly spread queriers among available nodes. topologySpreadConstraints.labelSelector.withMatchLabels({ name: name }) + topologySpreadConstraints.withTopologyKey('kubernetes.io/hostname') + - topologySpreadConstraints.withWhenUnsatisfiable($._config.multi_zone_ingester_topology_spread_when_unsatisfiable) + + topologySpreadConstraints.withWhenUnsatisfiable('ScheduleAnyway') + topologySpreadConstraints.withMaxSkew($._config.multi_zone_ingester_topology_spread_max_skew), ) else {} From 9687c4f18193241db3da3cd60c70102baddb79bb Mon Sep 17 00:00:00 2001 From: Paul Rogers <129207811+paul1r@users.noreply.github.com> Date: Mon, 3 Feb 2025 13:50:01 -0500 Subject: [PATCH 05/33] chore(build): Update workflow to leverage updates to latest GH and Docker tags (#16013) --- .github/jsonnetfile.json | 2 +- .github/jsonnetfile.lock.json | 4 ++-- .../grafana/loki-release/workflows/build.libsonnet | 14 +++++++------- .../loki-release/workflows/release.libsonnet | 2 ++ .../loki-release/workflows/workflows.jsonnet | 10 +++++----- .github/workflows/minor-release-pr.yml | 10 +++++----- .github/workflows/patch-release-pr.yml | 10 +++++----- .github/workflows/release.yml | 2 ++ 8 files changed, 29 insertions(+), 25 deletions(-) diff --git a/.github/jsonnetfile.json b/.github/jsonnetfile.json index 4afd3d544d070..e725dcde236de 100644 --- a/.github/jsonnetfile.json +++ b/.github/jsonnetfile.json @@ -8,7 +8,7 @@ "subdir": "workflows" } }, - "version": "5343bc71d96dc4247021a66c3da8fd5cd4c957dd" + "version": "965213a0fe2632438ab0524d606cb71d414e2388" } ], "legacyImports": true diff --git a/.github/jsonnetfile.lock.json b/.github/jsonnetfile.lock.json index d1c33af16bb49..cf74e548f5227 100644 --- a/.github/jsonnetfile.lock.json +++ b/.github/jsonnetfile.lock.json @@ -8,8 +8,8 @@ "subdir": "workflows" } }, - "version": "5343bc71d96dc4247021a66c3da8fd5cd4c957dd", - "sum": "/+ozeV2rndtz8N3cZmrWxbNJFI7fkwoDzhECMHG1RoA=" + "version": "965213a0fe2632438ab0524d606cb71d414e2388", + "sum": "DXmqwVyytIhA0tHlMQUCLD8buVjjCb04YcIxJ3BLFqM=" } ], "legacyImports": false diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet index 47f357a25ff11..519182d1d91b8 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/build.libsonnet @@ -148,15 +148,15 @@ local runner = import 'runner.libsonnet', dockerfile='Dockerfile', context='release', platform=[ - 'linux/amd64', - 'linux/arm64', + r.forPlatform('linux/amd64'), + r.forPlatform('linux/arm64'), ] ) - job.new() + job.new('${{ matrix.runs_on }}') + job.withStrategy({ 'fail-fast': true, matrix: { - platform: platform, + include: platform, }, }) + job.withSteps([ @@ -174,9 +174,9 @@ local runner = import 'runner.libsonnet', mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -190,7 +190,7 @@ local runner = import 'runner.libsonnet', + step.with({ context: context, file: 'release/%s/%s' % [path, dockerfile], - platforms: '${{ matrix.platform }}', + platforms: '${{ matrix.arch }}', push: false, tags: '${{ env.IMAGE_PREFIX }}/%s:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}' % [name], outputs: 'type=local,dest=release/plugins/%s-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}' % name, diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet index d35c8a76661e8..8441bad30930f 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/release.libsonnet @@ -182,6 +182,7 @@ local pullRequestFooter = 'Merging this PR will release the [artifacts](https:// + step.with({ imageDir: 'images', imagePrefix: '${{ env.IMAGE_PREFIX }}', + isLatest: '${{ needs.createRelease.outputs.isLatest }}', }), ] ), @@ -219,6 +220,7 @@ local pullRequestFooter = 'Merging this PR will release the [artifacts](https:// imagePrefix: '${{ env.IMAGE_PREFIX }}', isPlugin: true, buildDir: 'release/%s' % path, + isLatest: '${{ needs.createRelease.outputs.isLatest }}', }), ] ), diff --git a/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet b/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet index 930aa539f25c6..3329a6374368b 100644 --- a/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet +++ b/.github/vendor/github.com/grafana/loki-release/workflows/workflows.jsonnet @@ -10,13 +10,13 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; lokiRelease.releasePRWorkflow( imageJobs={ loki: build.image('fake-loki', 'cmd/loki'), - 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=['linux/amd64', 'linux/arm64']), + 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage), }, buildImage=buildImage, buildArtifactsBucket='loki-build-artifacts', branches=['release-[0-9]+.[0-9]+.x'], imagePrefix='trevorwhitney075', - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', skipValidation=false, versioningStrategy='always-bump-patch', @@ -28,14 +28,14 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; lokiRelease.releasePRWorkflow( imageJobs={ loki: build.image('fake-loki', 'cmd/loki'), - 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=['linux/amd64', 'linux/arm64']), + 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage), }, buildImage=buildImage, buildArtifactsBucket='loki-build-artifacts', branches=['release-[0-9]+.[0-9]+.x'], dryRun=true, imagePrefix='trevorwhitney075', - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', skipValidation=false, versioningStrategy='always-bump-patch', @@ -54,7 +54,7 @@ local dockerPluginDir = 'clients/cmd/docker-driver'; getDockerCredsFromVault=false, imagePrefix='trevorwhitney075', pluginBuildDir=dockerPluginDir, - releaseLibRef='release-1.14.x', + releaseLibRef='main', releaseRepo='grafana/loki-release', useGitHubAppToken=true, ) + { diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 111cc1da3bdff..7ce894025d039 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -618,7 +618,7 @@ jobs: loki-docker-driver: needs: - "version" - runs-on: "ubuntu-latest" + runs-on: "${{ matrix.runs_on }}" steps: - name: "pull release library code" uses: "actions/checkout@v4" @@ -649,9 +649,9 @@ jobs: mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -670,7 +670,7 @@ jobs: context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" - platforms: "${{ matrix.platform }}" + platforms: "${{ matrix.arch }}" push: false tags: "${{ env.IMAGE_PREFIX }}/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}" - if: "${{ fromJSON(needs.version.outputs.pr_created) }}" @@ -689,7 +689,7 @@ jobs: strategy: fail-fast: true matrix: - platform: + include: - arch: "linux/amd64" runs_on: - "github-hosted-ubuntu-x64-small" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 751ad5dc51538..8f2d1f1e302ad 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -618,7 +618,7 @@ jobs: loki-docker-driver: needs: - "version" - runs-on: "ubuntu-latest" + runs-on: "${{ matrix.runs_on }}" steps: - name: "pull release library code" uses: "actions/checkout@v4" @@ -649,9 +649,9 @@ jobs: mkdir -p images mkdir -p plugins - platform="$(echo "${{ matrix.platform}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" + platform="$(echo "${{ matrix.arch}}" | sed "s/\(.*\)\/\(.*\)/\1-\2/")" echo "platform=${platform}" >> $GITHUB_OUTPUT - echo "platform_short=$(echo ${{ matrix.platform }} | cut -d / -f 2)" >> $GITHUB_OUTPUT + echo "platform_short=$(echo ${{ matrix.arch }} | cut -d / -f 2)" >> $GITHUB_OUTPUT if [[ "${platform}" == "linux/arm64" ]]; then echo "plugin_arch=-arm64" >> $GITHUB_OUTPUT else @@ -670,7 +670,7 @@ jobs: context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" - platforms: "${{ matrix.platform }}" + platforms: "${{ matrix.arch }}" push: false tags: "${{ env.IMAGE_PREFIX }}/loki-docker-driver:${{ needs.version.outputs.version }}-${{ steps.platform.outputs.platform_short }}" - if: "${{ fromJSON(needs.version.outputs.pr_created) }}" @@ -689,7 +689,7 @@ jobs: strategy: fail-fast: true matrix: - platform: + include: - arch: "linux/amd64" runs_on: - "github-hosted-ubuntu-x64-small" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 426f523f80da7..67eefdbe76009 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -151,6 +151,7 @@ jobs: buildDir: "release/clients/cmd/docker-driver" imageDir: "plugins" imagePrefix: "${{ env.IMAGE_PREFIX }}" + isLatest: "${{ needs.createRelease.outputs.isLatest }}" isPlugin: true publishImages: needs: @@ -186,6 +187,7 @@ jobs: with: imageDir: "images" imagePrefix: "${{ env.IMAGE_PREFIX }}" + isLatest: "${{ needs.createRelease.outputs.isLatest }}" publishRelease: needs: - "createRelease" From dd8020917cc6081f88d5e94f46b4d87fcde7f2db Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 19:01:48 +0000 Subject: [PATCH 06/33] chore(deps): update terraform google to v6.19.0 (main) (#16068) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- tools/gcplog/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/gcplog/main.tf b/tools/gcplog/main.tf index 97ed5eddd5d5b..368f0865ed987 100644 --- a/tools/gcplog/main.tf +++ b/tools/gcplog/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { google = { source = "hashicorp/google" - version = "6.18.1" + version = "6.19.0" } } } From 91ff737136cdaf59c857e76f72736124c24c01c3 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 20:49:40 +0000 Subject: [PATCH 07/33] chore(deps): update terraform aws to ~> 5.85.0 (main) (#16072) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- production/terraform/modules/s3/versions.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/production/terraform/modules/s3/versions.tf b/production/terraform/modules/s3/versions.tf index de0769f7a1f35..0d1740bd76ed7 100644 --- a/production/terraform/modules/s3/versions.tf +++ b/production/terraform/modules/s3/versions.tf @@ -2,7 +2,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "~> 5.84.0" + version = "~> 5.85.0" } random = { From f2bff201d7b3608133a6e0ef3bfa4a37d5e33c67 Mon Sep 17 00:00:00 2001 From: Travis Patterson Date: Mon, 3 Feb 2025 14:13:21 -0700 Subject: [PATCH 08/33] fix: Determine when all logs have been filtered (#16073) --- pkg/distributor/http.go | 23 +++++++++--- pkg/distributor/http_test.go | 71 ++++++++++++++++++++++++++++-------- pkg/loghttp/push/push.go | 12 ++++-- 3 files changed, 80 insertions(+), 26 deletions(-) diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 7337ce16209c4..1b0cee2a9c62a 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -1,6 +1,7 @@ package distributor import ( + "errors" "fmt" "net/http" "strings" @@ -42,16 +43,26 @@ func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRe logPushRequestStreams := d.tenantConfigs.LogPushRequestStreams(tenantID) req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser, d.usageTracker, logPushRequestStreams) if err != nil { + if !errors.Is(err, push.ErrAllLogsFiltered) { + if d.tenantConfigs.LogPushRequest(tenantID) { + level.Debug(logger).Log( + "msg", "push request failed", + "code", http.StatusBadRequest, + "err", err, + ) + } + d.writeFailuresManager.Log(tenantID, fmt.Errorf("couldn't parse push request: %w", err)) + + errorWriter(w, err.Error(), http.StatusBadRequest, logger) + return + } + if d.tenantConfigs.LogPushRequest(tenantID) { level.Debug(logger).Log( - "msg", "push request failed", - "code", http.StatusBadRequest, - "err", err, + "msg", "successful push request filtered all lines", ) } - d.writeFailuresManager.Log(tenantID, fmt.Errorf("couldn't parse push request: %w", err)) - - errorWriter(w, err.Error(), http.StatusBadRequest, logger) + w.WriteHeader(http.StatusNoContent) return } diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go index 8da8fc608fa98..7e1ee788994c4 100644 --- a/pkg/distributor/http_test.go +++ b/pkg/distributor/http_test.go @@ -63,27 +63,66 @@ func TestDistributorRingHandler(t *testing.T) { } func TestRequestParserWrapping(t *testing.T) { - limits := &validation.Limits{} - flagext.DefaultValues(limits) - limits.RejectOldSamples = false - distributors, _ := prepare(t, 1, 3, limits, nil) + t.Run("it calls the parser wrapper if there is one", func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.RejectOldSamples = false + distributors, _ := prepare(t, 1, 3, limits, nil) - var called bool - distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { - called = true - return requestParser - } + var called bool + distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { + called = true + return requestParser + } + + ctx := user.InjectOrgID(context.Background(), "test-user") + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) + require.NoError(t, err) + + rec := httptest.NewRecorder() + distributors[0].pushHandler(rec, req, newFakeParser().parseRequest, push.HTTPError) - ctx := user.InjectOrgID(context.Background(), "test-user") - req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) - require.NoError(t, err) + // unprocessable code because there are no streams in the request. + require.Equal(t, http.StatusUnprocessableEntity, rec.Code) + require.True(t, called) + }) + + t.Run("it returns 204 when the parser wrapper filteres all log lines", func(t *testing.T) { + limits := &validation.Limits{} + flagext.DefaultValues(limits) + limits.RejectOldSamples = false + distributors, _ := prepare(t, 1, 3, limits, nil) - distributors[0].pushHandler(httptest.NewRecorder(), req, stubParser, push.HTTPError) + var called bool + distributors[0].RequestParserWrapper = func(requestParser push.RequestParser) push.RequestParser { + called = true + return requestParser + } + + ctx := user.InjectOrgID(context.Background(), "test-user") + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "fake-path", nil) + require.NoError(t, err) + + parser := newFakeParser() + parser.parseErr = push.ErrAllLogsFiltered + + rec := httptest.NewRecorder() + distributors[0].pushHandler(rec, req, parser.parseRequest, push.HTTPError) + + require.True(t, called) + require.Equal(t, http.StatusNoContent, rec.Code) + }) +} + +type fakeParser struct { + parseErr error +} - require.True(t, called) +func newFakeParser() *fakeParser { + return &fakeParser{} } -func stubParser( +func (p *fakeParser) parseRequest( _ string, _ *http.Request, _ push.TenantsRetention, @@ -92,5 +131,5 @@ func stubParser( _ bool, _ log.Logger, ) (*logproto.PushRequest, *push.Stats, error) { - return &logproto.PushRequest{}, &push.Stats{}, nil + return &logproto.PushRequest{}, &push.Stats{}, p.parseErr } diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go index 759e21f293ede..37938fe2a8e89 100644 --- a/pkg/loghttp/push/push.go +++ b/pkg/loghttp/push/push.go @@ -11,17 +11,19 @@ import ( "time" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/grafana/loki/pkg/push" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" + "github.com/dustin/go-humanize" "github.com/go-kit/log" "github.com/gogo/protobuf/proto" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/model/labels" - "google.golang.org/grpc/codes" - grpcstatus "google.golang.org/grpc/status" "github.com/grafana/loki/v3/pkg/analytics" "github.com/grafana/loki/v3/pkg/loghttp" @@ -66,6 +68,8 @@ const ( AggregatedMetricLabel = "__aggregated_metric__" ) +var ErrAllLogsFiltered = errors.New("all logs lines filtered during parsing") + type TenantsRetention interface { RetentionPeriodFor(userID string, lbs labels.Labels) time.Duration } @@ -111,7 +115,7 @@ type Stats struct { func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser, tracker UsageTracker, logPushRequestStreams bool) (*logproto.PushRequest, error) { req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits, tracker, logPushRequestStreams, logger) - if err != nil { + if err != nil && !errors.Is(err, ErrAllLogsFiltered) { return nil, err } @@ -164,7 +168,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete logValues = append(logValues, pushStats.Extra...) level.Debug(logger).Log(logValues...) - return req, nil + return req, err } func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { From 439977573da02f072ab37632ff60a12ad216605c Mon Sep 17 00:00:00 2001 From: Owen Diehl Date: Mon, 3 Feb 2025 14:52:54 -0800 Subject: [PATCH 09/33] feat(dataobj): tenant partition consumer (#16065) --- docs/sources/shared/configuration.md | 4 --- pkg/dataobj/builder.go | 1 + pkg/dataobj/consumer/config.go | 6 ----- pkg/dataobj/consumer/partition_processor.go | 29 ++++++++++++++++++--- pkg/dataobj/consumer/service.go | 23 +++++++++------- pkg/kafka/partitionring/consumer/client.go | 1 + pkg/loki/modules.go | 1 + 7 files changed, 41 insertions(+), 24 deletions(-) diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 46d079c7477dd..035c550db7eeb 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -808,10 +808,6 @@ dataobj_consumer: # CLI flag: -dataobj-consumer.buffer-size [buffer_size: | default = 16MiB] - # The tenant ID to use for the data object builder. - # CLI flag: -dataobj-consumer.tenant-id - [tenant_id: | default = "fake"] - # The prefix to use for the storage bucket. # CLI flag: -dataobj-consumer.storage-bucket-prefix [storage_bucket_prefix: | default = "dataobj/"] diff --git a/pkg/dataobj/builder.go b/pkg/dataobj/builder.go index a80591547b6aa..87f668bc4fd2c 100644 --- a/pkg/dataobj/builder.go +++ b/pkg/dataobj/builder.go @@ -375,6 +375,7 @@ func (b *Builder) Reset() { // reg must contain additional labels to differentiate between them. func (b *Builder) RegisterMetrics(reg prometheus.Registerer) error { reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg) + return b.metrics.Register(reg) } diff --git a/pkg/dataobj/consumer/config.go b/pkg/dataobj/consumer/config.go index c873eb59902bb..c62ae612193cb 100644 --- a/pkg/dataobj/consumer/config.go +++ b/pkg/dataobj/consumer/config.go @@ -1,7 +1,6 @@ package consumer import ( - "errors" "flag" "github.com/grafana/loki/v3/pkg/dataobj" @@ -9,15 +8,11 @@ import ( type Config struct { dataobj.BuilderConfig - TenantID string `yaml:"tenant_id"` // StorageBucketPrefix is the prefix to use for the storage bucket. StorageBucketPrefix string `yaml:"storage_bucket_prefix"` } func (cfg *Config) Validate() error { - if cfg.TenantID == "" { - return errors.New("tenantID is required") - } return cfg.BuilderConfig.Validate() } @@ -27,6 +22,5 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f) - f.StringVar(&cfg.TenantID, prefix+"tenant-id", "fake", "The tenant ID to use for the data object builder.") f.StringVar(&cfg.StorageBucketPrefix, prefix+"storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.") } diff --git a/pkg/dataobj/consumer/partition_processor.go b/pkg/dataobj/consumer/partition_processor.go index 0c5cdb1346228..0033e04c640f5 100644 --- a/pkg/dataobj/consumer/partition_processor.go +++ b/pkg/dataobj/consumer/partition_processor.go @@ -46,14 +46,16 @@ type partitionProcessor struct { logger log.Logger } -func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg dataobj.BuilderConfig, bucket objstore.Bucket, tenantID string, topic string, partition int32, logger log.Logger, reg prometheus.Registerer) *partitionProcessor { +func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg dataobj.BuilderConfig, bucket objstore.Bucket, tenantID string, virtualShard int32, topic string, partition int32, logger log.Logger, reg prometheus.Registerer) *partitionProcessor { ctx, cancel := context.WithCancel(ctx) decoder, err := kafka.NewDecoder() if err != nil { panic(err) } reg = prometheus.WrapRegistererWith(prometheus.Labels{ + "shard": strconv.Itoa(int(virtualShard)), "partition": strconv.Itoa(int(partition)), + "topic": topic, }, reg) metrics := newPartitionOffsetMetrics() @@ -70,7 +72,7 @@ func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg d return &partitionProcessor{ client: client, - logger: log.With(logger, "topic", topic, "partition", partition), + logger: log.With(logger, "topic", topic, "partition", partition, "tenant", tenantID), topic: topic, partition: partition, records: make(chan *kgo.Record, 1000), @@ -90,7 +92,6 @@ func (p *partitionProcessor) start() { p.wg.Add(1) go func() { defer p.wg.Done() - defer close(p.records) level.Info(p.logger).Log("msg", "started partition processor") for { @@ -98,7 +99,11 @@ func (p *partitionProcessor) start() { case <-p.ctx.Done(): level.Info(p.logger).Log("msg", "stopping partition processor") return - case record := <-p.records: + case record, ok := <-p.records: + if !ok { + // Channel was closed + return + } p.processRecord(record) } } @@ -114,6 +119,21 @@ func (p *partitionProcessor) stop() { p.metrics.unregister(p.reg) } +// Drops records from the channel if the processor is stopped. +// Returns false if the processor is stopped, true otherwise. +func (p *partitionProcessor) Append(records []*kgo.Record) bool { + for _, record := range records { + select { + // must check per-record in order to not block on a full channel + // after receiver has been stopped. + case <-p.ctx.Done(): + return false + case p.records <- record: + } + } + return true +} + func (p *partitionProcessor) initBuilder() error { var initErr error p.builderOnce.Do(func() { @@ -146,6 +166,7 @@ func (p *partitionProcessor) processRecord(record *kgo.Record) { // todo: handle multi-tenant if !bytes.Equal(record.Key, p.tenantID) { + level.Error(p.logger).Log("msg", "record key does not match tenant ID", "key", record.Key, "tenant_id", p.tenantID) return } stream, err := p.decoder.DecodeWithoutLabels(record.Value) diff --git a/pkg/dataobj/consumer/service.go b/pkg/dataobj/consumer/service.go index 44529ca749435..aee69dbd0dfd3 100644 --- a/pkg/dataobj/consumer/service.go +++ b/pkg/dataobj/consumer/service.go @@ -15,6 +15,7 @@ import ( "github.com/thanos-io/objstore" "github.com/twmb/franz-go/pkg/kgo" + "github.com/grafana/loki/v3/pkg/distributor" "github.com/grafana/loki/v3/pkg/kafka" "github.com/grafana/loki/v3/pkg/kafka/client" "github.com/grafana/loki/v3/pkg/kafka/partitionring/consumer" @@ -33,13 +34,14 @@ type Service struct { cfg Config bucket objstore.Bucket + codec distributor.TenantPrefixCodec // Partition management partitionMtx sync.RWMutex partitionHandlers map[string]map[int32]*partitionProcessor } -func New(kafkaCfg kafka.Config, cfg Config, bucket objstore.Bucket, instanceID string, partitionRing ring.PartitionRingReader, reg prometheus.Registerer, logger log.Logger) *Service { +func New(kafkaCfg kafka.Config, cfg Config, topicPrefix string, bucket objstore.Bucket, instanceID string, partitionRing ring.PartitionRingReader, reg prometheus.Registerer, logger log.Logger) *Service { if cfg.StorageBucketPrefix != "" { bucket = objstore.NewPrefixedBucket(bucket, cfg.StorageBucketPrefix) } @@ -47,6 +49,7 @@ func New(kafkaCfg kafka.Config, cfg Config, bucket objstore.Bucket, instanceID s logger: log.With(logger, "component", groupName), cfg: cfg, bucket: bucket, + codec: distributor.TenantPrefixCodec(topicPrefix), partitionHandlers: make(map[string]map[int32]*partitionProcessor), reg: reg, } @@ -80,12 +83,19 @@ func (s *Service) handlePartitionsAssigned(ctx context.Context, client *kgo.Clie defer s.partitionMtx.Unlock() for topic, parts := range partitions { + tenant, virtualShard, err := s.codec.Decode(topic) + // TODO: should propage more effectively + if err != nil { + level.Error(s.logger).Log("msg", "failed to decode topic", "topic", topic, "err", err) + continue + } + if _, ok := s.partitionHandlers[topic]; !ok { s.partitionHandlers[topic] = make(map[int32]*partitionProcessor) } for _, partition := range parts { - processor := newPartitionProcessor(ctx, client, s.cfg.BuilderConfig, s.bucket, s.cfg.TenantID, topic, partition, s.logger, s.reg) + processor := newPartitionProcessor(ctx, client, s.cfg.BuilderConfig, s.bucket, tenant, virtualShard, topic, partition, s.logger, s.reg) s.partitionHandlers[topic][partition] = processor processor.start() } @@ -155,14 +165,7 @@ func (s *Service) run(ctx context.Context) error { return } - for _, record := range records { - select { - case <-processor.ctx.Done(): - return - case processor.records <- record: - // Record sent successfully - } - } + _ = processor.Append(records) }) } } diff --git a/pkg/kafka/partitionring/consumer/client.go b/pkg/kafka/partitionring/consumer/client.go index 2e218949f9094..8790f12441260 100644 --- a/pkg/kafka/partitionring/consumer/client.go +++ b/pkg/kafka/partitionring/consumer/client.go @@ -37,6 +37,7 @@ type Client struct { func NewGroupClient(kafkaCfg kafka.Config, partitionRing ring.PartitionRingReader, groupName string, metrics *kprom.Metrics, logger log.Logger, opts ...kgo.Opt) (*Client, error) { defaultOpts := []kgo.Opt{ kgo.ConsumerGroup(groupName), + kgo.ConsumeRegex(), kgo.ConsumeTopics(kafkaCfg.Topic), kgo.Balancers(NewCooperativeActiveStickyBalancer(partitionRing)), kgo.ConsumeResetOffset(kgo.NewOffset().AtStart()), diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 0c79a98b2d422..540b25efe9f59 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -1923,6 +1923,7 @@ func (t *Loki) initDataObjConsumer() (services.Service, error) { t.dataObjConsumer = consumer.New( t.Cfg.KafkaConfig, t.Cfg.DataObjConsumer, + t.Cfg.Distributor.TenantTopic.TopicPrefix, store, t.Cfg.Ingester.LifecyclerConfig.ID, t.partitionRing, From 2ec948c1f28c57fb542e96218b41727301445632 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 03:50:37 +0000 Subject: [PATCH 10/33] fix(deps): update module go.opentelemetry.io/collector/pdata to v1.25.0 (main) (#16076) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- .../go.opentelemetry.io/collector/pdata/pcommon/timestamp.go | 4 ++-- vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go | 4 ++-- vendor/modules.txt | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 9d725c11bc143..f6b7dedfb55b7 100644 --- a/go.mod +++ b/go.mod @@ -143,7 +143,7 @@ require ( github.com/twmb/franz-go/plugin/kotel v1.5.0 github.com/twmb/franz-go/plugin/kprom v1.1.0 github.com/willf/bloom v2.0.3+incompatible - go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/pdata v1.25.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/oauth2 v0.25.0 golang.org/x/text v0.21.0 diff --git a/go.sum b/go.sum index 65f7198d167f9..ce878f9db4506 100644 --- a/go.sum +++ b/go.sum @@ -1207,8 +1207,8 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= -go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata v1.25.0 h1:AmgBklQfbfy0lT8qsoJtRuYMZ7ZV3VZvkvhjSDentrg= +go.opentelemetry.io/collector/pdata v1.25.0/go.mod h1:Zs7D4RXOGS7E2faGc/jfWdbmhoiHBxA7QbpuJOioxq8= go.opentelemetry.io/collector/semconv v0.108.1 h1:Txk9tauUnamZaxS5vlf1O0uZ4VD6nioRBR0nX8L/fU4= go.opentelemetry.io/collector/semconv v0.108.1/go.mod h1:zCJ5njhWpejR+A40kiEoeFm1xq1uzyZwMnRNX6/D82A= go.opentelemetry.io/contrib/detectors/gcp v1.33.0 h1:FVPoXEoILwgbZUu4X7YSgsESsAmGRgoYcnXkzgQPhP4= diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go index 666f86f43f649..037213a0cafb9 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/timestamp.go @@ -13,13 +13,13 @@ type Timestamp uint64 // NewTimestampFromTime constructs a new Timestamp from the provided time.Time. func NewTimestampFromTime(t time.Time) Timestamp { - // nolint:gosec + //nolint:gosec return Timestamp(uint64(t.UnixNano())) } // AsTime converts this to a time.Time. func (ts Timestamp) AsTime() time.Time { - // nolint:gosec + //nolint:gosec return time.Unix(0, int64(ts)).UTC() } diff --git a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go index ad2e1c7ae476a..73a95bcf2e280 100644 --- a/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go +++ b/vendor/go.opentelemetry.io/collector/pdata/pcommon/value.go @@ -148,7 +148,7 @@ func (v Value) FromRaw(iv any) error { case int64: v.SetInt(tv) case uint: - // nolint:gosec + //nolint:gosec v.SetInt(int64(tv)) case uint8: v.SetInt(int64(tv)) @@ -157,7 +157,7 @@ func (v Value) FromRaw(iv any) error { case uint32: v.SetInt(int64(tv)) case uint64: - // nolint:gosec + //nolint:gosec v.SetInt(int64(tv)) case float32: v.SetDouble(float64(tv)) diff --git a/vendor/modules.txt b/vendor/modules.txt index 5df67307d1534..4f92f6e5f9ae5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1782,7 +1782,7 @@ go.opencensus.io/tag ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/collector/pdata v1.24.0 +# go.opentelemetry.io/collector/pdata v1.25.0 ## explicit; go 1.22.0 go.opentelemetry.io/collector/pdata/internal go.opentelemetry.io/collector/pdata/internal/data From 3a02d64a04ffe53f11008b66919e62644401240b Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 06:51:13 +0000 Subject: [PATCH 11/33] chore(deps): update dependency @types/node to v22.13.1 (main) (#16077) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- pkg/dataobj/explorer/ui/package-lock.json | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/dataobj/explorer/ui/package-lock.json b/pkg/dataobj/explorer/ui/package-lock.json index ef3e50768ae31..37f8b3267a699 100644 --- a/pkg/dataobj/explorer/ui/package-lock.json +++ b/pkg/dataobj/explorer/ui/package-lock.json @@ -1194,9 +1194,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.0.tgz", - "integrity": "sha512-ClIbNe36lawluuvq3+YYhnIN2CELi+6q8NpnM7PYp4hBn/TatfboPgVSm2rwKRfnV2M+Ty9GWDFI64KEe+kysA==", + "version": "22.13.1", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.13.1.tgz", + "integrity": "sha512-jK8uzQlrvXqEU91UxiK5J7pKHyzgnI1Qnl0QDHIgVGuolJhRb9EEl28Cj9b3rGR8B2lhFCtvIm5os8lFnO/1Ew==", "dev": true, "license": "MIT", "dependencies": { From 388886661699dfbe4fe9826dfecd33107fd3383a Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Tue, 4 Feb 2025 08:50:45 +0100 Subject: [PATCH 12/33] fix(engine): Fix `Walk()` function implementation on various `Expr` implementations (#16033) The `Walk(f WalkFn)` implementation expects to first visit the current node and then invoke `Walk(f)` on all its children if they are not `nil`. This also fixes the `checkIntervalLimit(syntax.SampleExpr, time.Duration)` function, which did not visit the expression if it was a `*ConcatSampleExpr`. Signed-off-by: Christian Haudum --- pkg/logql/downstream.go | 33 +++++++++++++++-- pkg/logql/engine.go | 7 ++-- pkg/logql/engine_test.go | 39 ++++++++++++++++++++ pkg/logql/syntax/ast.go | 46 ++++++++++++------------ pkg/logql/syntax/walk.go | 6 ---- pkg/logql/syntax/walk_test.go | 4 +-- pkg/querier/queryrange/roundtrip_test.go | 2 +- 7 files changed, 97 insertions(+), 40 deletions(-) diff --git a/pkg/logql/downstream.go b/pkg/logql/downstream.go index 78af117c80518..c52d7dea6a043 100644 --- a/pkg/logql/downstream.go +++ b/pkg/logql/downstream.go @@ -139,7 +139,12 @@ func (d DownstreamLogSelectorExpr) Pretty(level int) string { return s } -func (d DownstreamSampleExpr) Walk(f syntax.WalkFn) { f(d) } +func (d DownstreamSampleExpr) Walk(f syntax.WalkFn) { + f(d) + if d.SampleExpr != nil { + d.SampleExpr.Walk(f) + } +} var defaultMaxDepth = 4 @@ -173,7 +178,12 @@ func (c *ConcatSampleExpr) string(maxDepth int) string { func (c *ConcatSampleExpr) Walk(f syntax.WalkFn) { f(c) - f(c.next) + if c.SampleExpr != nil { + c.SampleExpr.Walk(f) + } + if c.next != nil { + c.next.Walk(f) + } } // ConcatSampleExpr has no LogQL repretenstation. It is expressed in in the @@ -271,7 +281,12 @@ func (e QuantileSketchEvalExpr) String() string { func (e *QuantileSketchEvalExpr) Walk(f syntax.WalkFn) { f(e) - e.quantileMergeExpr.Walk(f) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } + if e.quantileMergeExpr != nil { + e.quantileMergeExpr.Walk(f) + } } type QuantileSketchMergeExpr struct { @@ -297,6 +312,9 @@ func (e QuantileSketchMergeExpr) String() string { func (e *QuantileSketchMergeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -326,6 +344,9 @@ func (e MergeFirstOverTimeExpr) String() string { func (e *MergeFirstOverTimeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -355,6 +376,9 @@ func (e MergeLastOverTimeExpr) String() string { func (e *MergeLastOverTimeExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } @@ -383,6 +407,9 @@ func (e CountMinSketchEvalExpr) String() string { func (e *CountMinSketchEvalExpr) Walk(f syntax.WalkFn) { f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } for _, d := range e.downstreams { d.Walk(f) } diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index 3d7cb541bb9dc..c3561e75184fa 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -489,11 +489,10 @@ func (q *query) checkIntervalLimit(expr syntax.SampleExpr, limit time.Duration) var err error expr.Walk(func(e syntax.Expr) { switch e := e.(type) { - case *syntax.RangeAggregationExpr: - if e.Left == nil || e.Left.Interval <= limit { - return + case *syntax.LogRange: + if e.Interval > limit { + err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Interval), model.Duration(limit)) } - err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Left.Interval), model.Duration(limit)) } }) return err diff --git a/pkg/logql/engine_test.go b/pkg/logql/engine_test.go index 889d06344ddbe..8a13e246f6386 100644 --- a/pkg/logql/engine_test.go +++ b/pkg/logql/engine_test.go @@ -38,6 +38,45 @@ var ( ErrMockMultiple = util.MultiError{ErrMock, ErrMock} ) +func TestEngine_checkIntervalLimit(t *testing.T) { + q := &query{} + for _, tc := range []struct { + query string + expErr string + }{ + {query: `rate({app="foo"} [1m])`, expErr: ""}, + {query: `rate({app="foo"} [10m])`, expErr: ""}, + {query: `max(rate({app="foo"} [5m])) - max(rate({app="bar"} [10m]))`, expErr: ""}, + {query: `rate({app="foo"} [5m]) - rate({app="bar"} [15m])`, expErr: "[15m] > [10m]"}, + {query: `rate({app="foo"} [1h])`, expErr: "[1h] > [10m]"}, + {query: `sum(rate({app="foo"} [1h]))`, expErr: "[1h] > [10m]"}, + {query: `sum_over_time({app="foo"} |= "foo" | json | unwrap bar [1h])`, expErr: "[1h] > [10m]"}, + } { + for _, downstream := range []bool{true, false} { + t.Run(fmt.Sprintf("%v/downstream=%v", tc.query, downstream), func(t *testing.T) { + expr := syntax.MustParseExpr(tc.query).(syntax.SampleExpr) + if downstream { + // Simulate downstream expression + expr = &ConcatSampleExpr{ + DownstreamSampleExpr: DownstreamSampleExpr{ + shard: nil, + SampleExpr: expr, + }, + next: nil, + } + } + err := q.checkIntervalLimit(expr, 10*time.Minute) + if tc.expErr != "" { + require.ErrorContains(t, err, tc.expErr) + } else { + require.NoError(t, err) + } + }) + } + + } +} + func TestEngine_LogsRateUnwrap(t *testing.T) { t.Parallel() for _, test := range []struct { diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 29cb5e548ddd6..33959098d8e24 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -341,16 +341,12 @@ func (e *PipelineExpr) Shardable(topLevel bool) bool { func (e *PipelineExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - - xs := make([]Walkable, 0, len(e.MultiStages)+1) - xs = append(xs, e.Left) for _, p := range e.MultiStages { - xs = append(xs, p) + p.Walk(f) } - walkAll(f, xs...) } func (e *PipelineExpr) Accept(v RootVisitor) { v.VisitPipeline(e) } @@ -501,10 +497,12 @@ func (*LineFilterExpr) isStageExpr() {} func (e *LineFilterExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) + } + if e.Or != nil { + e.Or.Walk(f) } - e.Left.Walk(f) } func (e *LineFilterExpr) Accept(v RootVisitor) { @@ -1153,10 +1151,9 @@ func (r *LogRange) Shardable(topLevel bool) bool { return r.Left.Shardable(topLe func (r *LogRange) Walk(f WalkFn) { f(r) - if r.Left == nil { - return + if r.Left != nil { + r.Left.Walk(f) } - r.Left.Walk(f) } func (r *LogRange) Accept(v RootVisitor) { @@ -1476,10 +1473,9 @@ func (e *RangeAggregationExpr) Shardable(topLevel bool) bool { func (e *RangeAggregationExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *RangeAggregationExpr) Accept(v RootVisitor) { v.VisitRangeAggregation(e) } @@ -1686,10 +1682,9 @@ func (e *VectorAggregationExpr) Shardable(topLevel bool) bool { func (e *VectorAggregationExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *VectorAggregationExpr) Accept(v RootVisitor) { v.VisitVectorAggregation(e) } @@ -1806,7 +1801,13 @@ func (e *BinOpExpr) Shardable(topLevel bool) bool { } func (e *BinOpExpr) Walk(f WalkFn) { - walkAll(f, e.SampleExpr, e.RHS) + f(e) + if e.SampleExpr != nil { + e.SampleExpr.Walk(f) + } + if e.RHS != nil { + e.RHS.Walk(f) + } } func (e *BinOpExpr) Accept(v RootVisitor) { v.VisitBinOp(e) } @@ -2235,10 +2236,9 @@ func (e *LabelReplaceExpr) Shardable(_ bool) bool { func (e *LabelReplaceExpr) Walk(f WalkFn) { f(e) - if e.Left == nil { - return + if e.Left != nil { + e.Left.Walk(f) } - e.Left.Walk(f) } func (e *LabelReplaceExpr) Accept(v RootVisitor) { v.VisitLabelReplace(e) } diff --git a/pkg/logql/syntax/walk.go b/pkg/logql/syntax/walk.go index 291ec8b31036f..c728c55c1a38d 100644 --- a/pkg/logql/syntax/walk.go +++ b/pkg/logql/syntax/walk.go @@ -2,12 +2,6 @@ package syntax type WalkFn = func(e Expr) -func walkAll(f WalkFn, xs ...Walkable) { - for _, x := range xs { - x.Walk(f) - } -} - type Walkable interface { Walk(f WalkFn) } diff --git a/pkg/logql/syntax/walk_test.go b/pkg/logql/syntax/walk_test.go index ee536e969471f..9f0a5015ed731 100644 --- a/pkg/logql/syntax/walk_test.go +++ b/pkg/logql/syntax/walk_test.go @@ -22,7 +22,7 @@ func Test_Walkable(t *testing.T) { { desc: "bin op query", expr: `(sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m])) / sum by(cluster)(rate({job="foo"} |= "bar" | logfmt | bazz="buzz"[5m])))`, - want: 16, + want: 17, }, } for _, test := range tests { @@ -79,8 +79,6 @@ func Test_AppendMatchers(t *testing.T) { switch me := e.(type) { case *MatchersExpr: me.AppendMatchers(test.matchers) - default: - // Do nothing } }) require.Equal(t, test.want, expr.String()) diff --git a/pkg/querier/queryrange/roundtrip_test.go b/pkg/querier/queryrange/roundtrip_test.go index 8aa66e8419e61..bafe57096de98 100644 --- a/pkg/querier/queryrange/roundtrip_test.go +++ b/pkg/querier/queryrange/roundtrip_test.go @@ -1464,7 +1464,7 @@ func (f fakeLimits) MaxQueryLength(context.Context, string) time.Duration { } func (f fakeLimits) MaxQueryRange(context.Context, string) time.Duration { - return time.Second + return time.Hour } func (f fakeLimits) MaxQueryParallelism(context.Context, string) int { From 9d84a6868b4327a243c6f26e0c5c9954402ada23 Mon Sep 17 00:00:00 2001 From: sherinabr Date: Tue, 4 Feb 2025 14:10:12 +0000 Subject: [PATCH 13/33] fix: export ExcludedMetadataLabels so it can be extended in GEL (#16083) --- pkg/util/entry_size.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/util/entry_size.go b/pkg/util/entry_size.go index 4f2c8f0bf82dc..91f0b300010a6 100644 --- a/pkg/util/entry_size.go +++ b/pkg/util/entry_size.go @@ -20,12 +20,12 @@ func EntryTotalSize(entry *push.Entry) int { return len(entry.Line) + StructuredMetadataSize(entry.StructuredMetadata) } -var excludedStructuredMetadataLabels = []string{constants.LevelLabel} +var ExcludedStructuredMetadataLabels = []string{constants.LevelLabel} func StructuredMetadataSize(metas push.LabelsAdapter) int { size := 0 for _, meta := range metas { - if slices.Contains(excludedStructuredMetadataLabels, meta.Name) { + if slices.Contains(ExcludedStructuredMetadataLabels, meta.Name) { continue } size += len(meta.Name) + len(meta.Value) From 59ade9b10559cdff5a4f42f14a93a7a393737e1b Mon Sep 17 00:00:00 2001 From: Mateusz Drab Date: Tue, 4 Feb 2025 06:43:50 -0800 Subject: [PATCH 14/33] docs: add match_first_network to docker_sd_configs docs (#16081) Signed-off-by: Mateusz Drab Co-authored-by: J Stickler --- docs/sources/send-data/promtail/configuration.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/sources/send-data/promtail/configuration.md b/docs/sources/send-data/promtail/configuration.md index 2d768f22f3667..70696f32bc002 100644 --- a/docs/sources/send-data/promtail/configuration.md +++ b/docs/sources/send-data/promtail/configuration.md @@ -2034,6 +2034,11 @@ tls_config: # The host to use if the container is in host networking mode. [ host_networking_host: | default = "localhost" ] +# Sort all non-nil networks in ascending order based on network name and +# get the first network if the container has multiple networks defined, +# thus avoiding collecting duplicate targets. +[ match_first_network: | default = true ] + # Optional filters to limit the discovery process to a subset of available # resources. # The available filters are listed in the Docker documentation: From 9ddc756c1d18fff4c9f91b560a688e15292f9be4 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 14:58:26 +0000 Subject: [PATCH 15/33] fix(deps): update module golang.org/x/oauth2 to v0.26.0 (main) (#16085) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +-- vendor/golang.org/x/oauth2/google/default.go | 12 +++++++ .../google/externalaccount/basecredentials.go | 32 +++++++++++++++++++ vendor/modules.txt | 2 +- 5 files changed, 48 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index f6b7dedfb55b7..b99581f1ed29d 100644 --- a/go.mod +++ b/go.mod @@ -145,7 +145,7 @@ require ( github.com/willf/bloom v2.0.3+incompatible go.opentelemetry.io/collector/pdata v1.25.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f - golang.org/x/oauth2 v0.25.0 + golang.org/x/oauth2 v0.26.0 golang.org/x/text v0.21.0 google.golang.org/protobuf v1.36.4 gotest.tools v2.2.0+incompatible diff --git a/go.sum b/go.sum index ce878f9db4506..d2cb7fc558638 100644 --- a/go.sum +++ b/go.sum @@ -1366,8 +1366,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.26.0 h1:afQXWNNaeC4nvZ0Ed9XvCCzXM6UHJG7iCg0W4fPqSBE= +golang.org/x/oauth2 v0.26.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index df958359a8706..0260935bab745 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -251,6 +251,12 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh // token JSON), or the JSON configuration file for workload identity federation in non-Google cloud // platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +// +// Important: If you accept a credential configuration (credential JSON/File/Stream) from an +// external source for authentication to Google Cloud Platform, you must validate it before +// providing it to any Google API or library. Providing an unvalidated credential configuration to +// Google APIs can compromise the security of your systems and data. For more information, refer to +// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -294,6 +300,12 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +// +// Important: If you accept a credential configuration (credential JSON/File/Stream) from an +// external source for authentication to Google Cloud Platform, you must validate it before +// providing it to any Google API or library. Providing an unvalidated credential configuration to +// Google APIs can compromise the security of your systems and data. For more information, refer to +// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var params CredentialsParams params.Scopes = scopes diff --git a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index ee34924e301b1..fc106347d85c5 100644 --- a/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -278,20 +278,52 @@ type Format struct { type CredentialSource struct { // File is the location for file sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). File string `json:"file"` // Url is the URL to call for URL sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). URL string `json:"url"` // Headers are the headers to attach to the request for URL sourced credentials. Headers map[string]string `json:"headers"` // Executable is the configuration object for executable sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). Executable *ExecutableConfig `json:"executable"` // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). EnvironmentID string `json:"environment_id"` // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. RegionURL string `json:"region_url"` diff --git a/vendor/modules.txt b/vendor/modules.txt index 4f92f6e5f9ae5..cabbd0a33109d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1933,7 +1933,7 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.25.0 +# golang.org/x/oauth2 v0.26.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler From 536dbc6e185c414acb90b0efec524683e21a098c Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 15:29:56 +0000 Subject: [PATCH 16/33] fix(deps): update module golang.org/x/sync to v0.11.0 (main) (#16086) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/golang.org/x/sync/errgroup/errgroup.go | 1 + vendor/modules.txt | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index b99581f1ed29d..c2f1561b05771 100644 --- a/go.mod +++ b/go.mod @@ -100,7 +100,7 @@ require ( go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 - golang.org/x/sync v0.10.0 + golang.org/x/sync v0.11.0 golang.org/x/sys v0.29.0 golang.org/x/time v0.9.0 google.golang.org/api v0.219.0 diff --git a/go.sum b/go.sum index d2cb7fc558638..7fd2d908908b6 100644 --- a/go.sum +++ b/go.sum @@ -1381,8 +1381,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63d4ff..b8322598ae3ea 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/modules.txt b/vendor/modules.txt index cabbd0a33109d..a135d97391855 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1946,7 +1946,7 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.10.0 +# golang.org/x/sync v0.11.0 ## explicit; go 1.18 golang.org/x/sync/errgroup golang.org/x/sync/semaphore From 7f32de6cf256e21ced948b860b835cb37f89a6b9 Mon Sep 17 00:00:00 2001 From: benclive Date: Tue, 4 Feb 2025 15:36:48 +0000 Subject: [PATCH 17/33] chore(dataobj): Refactor processor & builder to separate concerns (#16055) --- docs/sources/shared/configuration.md | 10 +- go.mod | 2 +- go.sum | 4 +- pkg/dataobj/builder.go | 134 ++++++------------ pkg/dataobj/builder_test.go | 65 ++++----- pkg/dataobj/consumer/config.go | 7 + pkg/dataobj/consumer/metrics.go | 15 +- pkg/dataobj/consumer/partition_processor.go | 98 ++++++++----- pkg/dataobj/consumer/service.go | 2 +- pkg/dataobj/internal/encoding/encoder.go | 8 ++ .../internal/sections/streams/streams.go | 16 +-- pkg/dataobj/metastore/metastore.go | 54 ++++--- pkg/dataobj/metastore/metastore_test.go | 25 ++-- pkg/dataobj/metrics.go | 44 ++---- pkg/dataobj/uploader/metrics.go | 63 ++++++++ pkg/dataobj/uploader/uploader.go | 93 ++++++++++++ .../providers/filesystem/filesystem.go | 10 +- .../thanos-io/objstore/providers/gcs/gcs.go | 4 +- vendor/modules.txt | 4 +- 19 files changed, 383 insertions(+), 275 deletions(-) create mode 100644 pkg/dataobj/uploader/metrics.go create mode 100644 pkg/dataobj/uploader/uploader.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index 035c550db7eeb..b53d5266caba6 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -788,10 +788,6 @@ kafka_config: dataobj_consumer: builderconfig: - # The size of the SHA prefix to use for the data object builder. - # CLI flag: -dataobj-consumer.sha-prefix-size - [sha_prefix_size: | default = 2] - # The size of the target page to use for the data object builder. # CLI flag: -dataobj-consumer.target-page-size [target_page_size: | default = 2MiB] @@ -808,6 +804,12 @@ dataobj_consumer: # CLI flag: -dataobj-consumer.buffer-size [buffer_size: | default = 16MiB] + uploader: + # The size of the SHA prefix to use for generating object storage keys for + # data objects. + # CLI flag: -dataobj-consumer.sha-prefix-size + [shaprefixsize: | default = 2] + # The prefix to use for the storage bucket. # CLI flag: -dataobj-consumer.storage-bucket-prefix [storage_bucket_prefix: | default = "dataobj/"] diff --git a/go.mod b/go.mod index c2f1561b05771..2ae12f5d18c2e 100644 --- a/go.mod +++ b/go.mod @@ -409,4 +409,4 @@ replace github.com/grafana/loki/pkg/push => ./pkg/push // leodido fork his project to continue support replace github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0 -replace github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 +replace github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6 diff --git a/go.sum b/go.sum index 7fd2d908908b6..0e1a136281545 100644 --- a/go.sum +++ b/go.sum @@ -628,8 +628,8 @@ github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675 h1:U94jQ2TQr1m3 github.com/grafana/jsonparser v0.0.0-20241004153430-023329977675/go.mod h1:796sq+UcONnSlzA3RtlBZ+b/hrerkZXiEmO8oMjyRwY= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe h1:yIXAAbLswn7VNWBIvM71O2QsgfgW9fRXZNR0DXe6pDU= github.com/grafana/memberlist v0.3.1-0.20220714140823-09ffed8adbbe/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 h1:/y3qC0I9kttHjLPxp4bGf+4jcJw60C6hrokTPckHYT8= -github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM= +github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6 h1:SlGPi1Sg15c/OzhGMAd7/EOnYJ03ZX6Wuql8lQ2pRU4= +github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6/go.mod h1:Quz9HUDjGidU0RQpoytzK4KqJ7kwzP+DMAm4K57/usM= github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKtUuKQbJqgAIjlnicKg= github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= diff --git a/pkg/dataobj/builder.go b/pkg/dataobj/builder.go index 87f668bc4fd2c..fefe62cb1c1d8 100644 --- a/pkg/dataobj/builder.go +++ b/pkg/dataobj/builder.go @@ -3,8 +3,6 @@ package dataobj import ( "bytes" "context" - "crypto/sha256" - "encoding/hex" "errors" "flag" "fmt" @@ -14,7 +12,6 @@ import ( lru "github.com/hashicorp/golang-lru/v2" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" - "github.com/thanos-io/objstore" "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding" "github.com/grafana/loki/v3/pkg/dataobj/internal/sections/logs" @@ -23,16 +20,15 @@ import ( "github.com/grafana/loki/v3/pkg/logql/syntax" ) -// ErrBufferFull is returned by [Builder.Append] when the buffer is full and +// ErrBuilderFull is returned by [Builder.Append] when the buffer is full and // needs to flush; call [Builder.Flush] to flush it. -var ErrBufferFull = errors.New("buffer full") +var ( + ErrBuilderFull = errors.New("builder full") + ErrBuilderEmpty = errors.New("builder empty") +) // BuilderConfig configures a data object [Builder]. type BuilderConfig struct { - // SHAPrefixSize sets the number of bytes of the SHA filename to use as a - // folder path. - SHAPrefixSize int `yaml:"sha_prefix_size"` - // TargetPageSize configures a target size for encoded pages within the data // object. TargetPageSize accounts for encoding, but not for compression. TargetPageSize flagext.Bytes `yaml:"target_page_size"` @@ -65,7 +61,6 @@ func (cfg *BuilderConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet _ = cfg.BufferSize.Set("16MB") // Page Size * 8 _ = cfg.TargetSectionSize.Set("128MB") // Target Object Size / 8 - f.IntVar(&cfg.SHAPrefixSize, prefix+"sha-prefix-size", 2, "The size of the SHA prefix to use for the data object builder.") f.Var(&cfg.TargetPageSize, prefix+"target-page-size", "The size of the target page to use for the data object builder.") f.Var(&cfg.TargetObjectSize, prefix+"target-object-size", "The size of the target object to use for the data object builder.") f.Var(&cfg.TargetSectionSize, prefix+"target-section-size", "Configures a maximum size for sections, for sections that support it.") @@ -76,10 +71,6 @@ func (cfg *BuilderConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet func (cfg *BuilderConfig) Validate() error { var errs []error - if cfg.SHAPrefixSize <= 0 { - errs = append(errs, errors.New("SHAPrefixSize must be greater than 0")) - } - if cfg.TargetPageSize <= 0 { errs = append(errs, errors.New("TargetPageSize must be greater than 0")) } else if cfg.TargetPageSize >= cfg.TargetObjectSize { @@ -108,47 +99,41 @@ func (cfg *BuilderConfig) Validate() error { // Methods on Builder are not goroutine-safe; callers are responsible for // synchronizing calls. type Builder struct { - cfg BuilderConfig - metrics *metrics - bucket objstore.Bucket - tenantID string + cfg BuilderConfig + metrics *metrics labelCache *lru.Cache[string, labels.Labels] currentSizeEstimate int - state builderState streams *streams.Streams logs *logs.Logs - flushBuffer *bytes.Buffer - encoder *encoding.Encoder + state builderState + + encoder *encoding.Encoder } type builderState int -type FlushResult struct { - Path string - MinTimestamp time.Time - MaxTimestamp time.Time -} - const ( - // builderStateReady indicates the builder is empty and ready to accept new data. + // builderStateEmpty indicates the builder is empty and ready to accept new data. builderStateEmpty builderState = iota // builderStateDirty indicates the builder has been modified since the last flush. builderStateDirty - - // builderStateFlushing indicates the builder has data to flush. - builderStateFlush ) +type FlushStats struct { + MinTimestamp time.Time + MaxTimestamp time.Time +} + // NewBuilder creates a new Builder which stores data objects for the specified // tenant in a bucket. // // NewBuilder returns an error if BuilderConfig is invalid. -func NewBuilder(cfg BuilderConfig, bucket objstore.Bucket, tenantID string) (*Builder, error) { +func NewBuilder(cfg BuilderConfig) (*Builder, error) { if err := cfg.Validate(); err != nil { return nil, err } @@ -160,17 +145,12 @@ func NewBuilder(cfg BuilderConfig, bucket objstore.Bucket, tenantID string) (*Bu var ( metrics = newMetrics() - - flushBuffer = bytes.NewBuffer(make([]byte, 0, int(cfg.TargetObjectSize))) - encoder = encoding.NewEncoder(flushBuffer) ) metrics.ObserveConfig(cfg) return &Builder{ - cfg: cfg, - metrics: metrics, - bucket: bucket, - tenantID: tenantID, + cfg: cfg, + metrics: metrics, labelCache: labelCache, @@ -181,23 +161,17 @@ func NewBuilder(cfg BuilderConfig, bucket objstore.Bucket, tenantID string) (*Bu SectionSize: int(cfg.TargetSectionSize), }), - flushBuffer: flushBuffer, - encoder: encoder, + encoder: encoding.NewEncoder(nil), }, nil } // Append buffers a stream to be written to a data object. Append returns an -// error if the stream labels cannot be parsed or [ErrBufferFull] if the +// error if the stream labels cannot be parsed or [ErrBuilderFull] if the // builder is full. // // Once a Builder is full, call [Builder.Flush] to flush the buffered data, // then call Append again with the same entry. func (b *Builder) Append(stream logproto.Stream) error { - // Don't allow appending to a builder that has data to be flushed. - if b.state == builderStateFlush { - return ErrBufferFull - } - ls, err := b.parseLabels(stream.Labels) if err != nil { return err @@ -210,7 +184,7 @@ func (b *Builder) Append(stream logproto.Stream) error { // b.currentSizeEstimate will always be updated to reflect the size following // the previous append. if b.state != builderStateEmpty && b.currentSizeEstimate+labelsEstimate(ls)+streamSizeEstimate(stream) > int(b.cfg.TargetObjectSize) { - return ErrBufferFull + return ErrBuilderFull } timer := prometheus.NewTimer(b.metrics.appendTime) @@ -286,60 +260,37 @@ func streamSizeEstimate(stream logproto.Stream) int { return size } -// Flush flushes all buffered data to object storage. Calling Flush can result +// Flush flushes all buffered data to the buffer provided. Calling Flush can result // in a no-op if there is no buffered data to flush. // -// If Flush builds an object but fails to upload it to object storage, the -// built object is cached and can be retried. [Builder.Reset] can be called to -// discard any pending data and allow new data to be appended. -func (b *Builder) Flush(ctx context.Context) (FlushResult, error) { - buf, err := b.FlushToBuffer() - if err != nil { - return FlushResult{}, fmt.Errorf("flushing buffer: %w", err) +// [Builder.Reset] is called after a successful Flush to discard any pending data and allow new data to be appended. +func (b *Builder) Flush(output *bytes.Buffer) (FlushStats, error) { + if b.state == builderStateEmpty { + return FlushStats{}, ErrBuilderEmpty } - timer := prometheus.NewTimer(b.metrics.flushTime) - defer timer.ObserveDuration() - - sum := sha256.Sum224(b.flushBuffer.Bytes()) - sumStr := hex.EncodeToString(sum[:]) - - objectPath := fmt.Sprintf("tenant-%s/objects/%s/%s", b.tenantID, sumStr[:b.cfg.SHAPrefixSize], sumStr[b.cfg.SHAPrefixSize:]) - if err := b.bucket.Upload(ctx, objectPath, bytes.NewReader(buf.Bytes())); err != nil { - return FlushResult{}, fmt.Errorf("uploading object: %w", err) + err := b.buildObject(output) + if err != nil { + b.metrics.flushFailures.Inc() + return FlushStats{}, fmt.Errorf("building object: %w", err) } - minTime, maxTime := b.streams.GetBounds() + minTime, maxTime := b.streams.TimeRange() b.Reset() - return FlushResult{ - Path: objectPath, + return FlushStats{ MinTimestamp: minTime, MaxTimestamp: maxTime, }, nil } -func (b *Builder) FlushToBuffer() (*bytes.Buffer, error) { - switch b.state { - case builderStateEmpty: - return nil, nil // Nothing to flush - case builderStateDirty: - if err := b.buildObject(); err != nil { - return nil, fmt.Errorf("building object: %w", err) - } - b.state = builderStateFlush - } - - return b.flushBuffer, nil -} - -func (b *Builder) buildObject() error { +func (b *Builder) buildObject(output *bytes.Buffer) error { timer := prometheus.NewTimer(b.metrics.buildTime) defer timer.ObserveDuration() - // We reset after a successful flush, but we also reset the buffer before - // building for safety. - b.flushBuffer.Reset() + initialBufferSize := output.Len() + + b.encoder.Reset(output) if err := b.streams.EncodeTo(b.encoder); err != nil { return fmt.Errorf("encoding streams: %w", err) @@ -349,12 +300,12 @@ func (b *Builder) buildObject() error { return fmt.Errorf("encoding object: %w", err) } - b.metrics.builtSize.Observe(float64(b.flushBuffer.Len())) + b.metrics.builtSize.Observe(float64(output.Len() - initialBufferSize)) // We pass context.Background() below to avoid allowing building an object to // time out; timing out on build would discard anything we built and would // cause data loss. - dec := encoding.ReaderAtDecoder(bytes.NewReader(b.flushBuffer.Bytes()), int64(b.flushBuffer.Len())) + dec := encoding.ReaderAtDecoder(bytes.NewReader(output.Bytes()[initialBufferSize:]), int64(output.Len()-initialBufferSize)) return b.metrics.encoding.Observe(context.Background(), dec) } @@ -363,9 +314,9 @@ func (b *Builder) Reset() { b.logs.Reset() b.streams.Reset() - b.state = builderStateEmpty - b.flushBuffer.Reset() b.metrics.sizeEstimate.Set(0) + b.currentSizeEstimate = 0 + b.state = builderStateEmpty } // RegisterMetrics registers metrics about builder to report to reg. All @@ -374,13 +325,10 @@ func (b *Builder) Reset() { // If multiple Builders for the same tenant are running in the same process, // reg must contain additional labels to differentiate between them. func (b *Builder) RegisterMetrics(reg prometheus.Registerer) error { - reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg) - return b.metrics.Register(reg) } // UnregisterMetrics unregisters metrics about builder from reg. func (b *Builder) UnregisterMetrics(reg prometheus.Registerer) { - reg = prometheus.WrapRegistererWith(prometheus.Labels{"tenant": b.tenantID}, reg) b.metrics.Unregister(reg) } diff --git a/pkg/dataobj/builder_test.go b/pkg/dataobj/builder_test.go index ff86e8dcb941c..20e9d0db9b4f7 100644 --- a/pkg/dataobj/builder_test.go +++ b/pkg/dataobj/builder_test.go @@ -1,25 +1,21 @@ package dataobj import ( + "bytes" "context" "errors" - "fmt" "strings" "testing" "time" "github.com/stretchr/testify/require" - "github.com/thanos-io/objstore" "github.com/grafana/loki/pkg/push" - "github.com/grafana/loki/v3/pkg/dataobj/internal/result" "github.com/grafana/loki/v3/pkg/logproto" ) var testBuilderConfig = BuilderConfig{ - SHAPrefixSize: 2, - TargetPageSize: 2048, TargetObjectSize: 4096, TargetSectionSize: 4096, @@ -28,7 +24,8 @@ var testBuilderConfig = BuilderConfig{ } func TestBuilder(t *testing.T) { - bucket := objstore.NewInMemBucket() + buf := bytes.NewBuffer(nil) + dirtyBuf := bytes.NewBuffer([]byte("dirty")) streams := []logproto.Stream{ { @@ -75,22 +72,40 @@ func TestBuilder(t *testing.T) { } t.Run("Build", func(t *testing.T) { - builder, err := NewBuilder(testBuilderConfig, bucket, "fake") + builder, err := NewBuilder(testBuilderConfig) require.NoError(t, err) for _, entry := range streams { require.NoError(t, builder.Append(entry)) } - _, err = builder.Flush(context.Background()) + _, err = builder.Flush(buf) require.NoError(t, err) }) t.Run("Read", func(t *testing.T) { - objects, err := result.Collect(listObjects(context.Background(), bucket, "fake")) + obj := FromReaderAt(bytes.NewReader(buf.Bytes()), int64(buf.Len())) + md, err := obj.Metadata(context.Background()) + require.NoError(t, err) + require.Equal(t, 1, md.StreamsSections) + require.Equal(t, 1, md.LogsSections) + }) + + t.Run("BuildWithDirtyBuffer", func(t *testing.T) { + builder, err := NewBuilder(testBuilderConfig) require.NoError(t, err) - require.Len(t, objects, 1) - obj := FromBucket(bucket, objects[0]) + for _, entry := range streams { + require.NoError(t, builder.Append(entry)) + } + + _, err = builder.Flush(dirtyBuf) + require.NoError(t, err) + + require.Equal(t, buf.Len(), dirtyBuf.Len()-5) + }) + + t.Run("ReadFromDirtyBuffer", func(t *testing.T) { + obj := FromReaderAt(bytes.NewReader(dirtyBuf.Bytes()[5:]), int64(dirtyBuf.Len()-5)) md, err := obj.Metadata(context.Background()) require.NoError(t, err) require.Equal(t, 1, md.StreamsSections) @@ -104,9 +119,7 @@ func TestBuilder_Append(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - bucket := objstore.NewInMemBucket() - - builder, err := NewBuilder(testBuilderConfig, bucket, "fake") + builder, err := NewBuilder(testBuilderConfig) require.NoError(t, err) for { @@ -119,31 +132,9 @@ func TestBuilder_Append(t *testing.T) { Line: strings.Repeat("a", 1024), }}, }) - if errors.Is(err, ErrBufferFull) { + if errors.Is(err, ErrBuilderFull) { break } require.NoError(t, err) } } - -func listObjects(ctx context.Context, bucket objstore.Bucket, tenant string) result.Seq[string] { - tenantPath := fmt.Sprintf("tenant-%s/objects/", tenant) - - return result.Iter(func(yield func(string) bool) error { - errIterationStopped := errors.New("iteration stopped") - - err := bucket.Iter(ctx, tenantPath, func(name string) error { - if !yield(name) { - return errIterationStopped - } - return nil - }, objstore.WithRecursiveIter()) - - switch { - case errors.Is(err, errIterationStopped): - return nil - default: - return err - } - }) -} diff --git a/pkg/dataobj/consumer/config.go b/pkg/dataobj/consumer/config.go index c62ae612193cb..04b1ba58bc2ba 100644 --- a/pkg/dataobj/consumer/config.go +++ b/pkg/dataobj/consumer/config.go @@ -4,15 +4,21 @@ import ( "flag" "github.com/grafana/loki/v3/pkg/dataobj" + "github.com/grafana/loki/v3/pkg/dataobj/uploader" ) type Config struct { dataobj.BuilderConfig + UploaderConfig uploader.Config `yaml:"uploader"` // StorageBucketPrefix is the prefix to use for the storage bucket. StorageBucketPrefix string `yaml:"storage_bucket_prefix"` } func (cfg *Config) Validate() error { + if err := cfg.UploaderConfig.Validate(); err != nil { + return err + } + return cfg.BuilderConfig.Validate() } @@ -22,5 +28,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { cfg.BuilderConfig.RegisterFlagsWithPrefix(prefix, f) + cfg.UploaderConfig.RegisterFlagsWithPrefix(prefix, f) f.StringVar(&cfg.StorageBucketPrefix, prefix+"storage-bucket-prefix", "dataobj/", "The prefix to use for the storage bucket.") } diff --git a/pkg/dataobj/consumer/metrics.go b/pkg/dataobj/consumer/metrics.go index 4525cb512de3b..f2ef8f35f9555 100644 --- a/pkg/dataobj/consumer/metrics.go +++ b/pkg/dataobj/consumer/metrics.go @@ -13,7 +13,6 @@ type partitionOffsetMetrics struct { lastOffset atomic.Int64 // Error counters - flushFailures prometheus.Counter commitFailures prometheus.Counter appendFailures prometheus.Counter @@ -23,10 +22,6 @@ type partitionOffsetMetrics struct { func newPartitionOffsetMetrics() *partitionOffsetMetrics { p := &partitionOffsetMetrics{ - flushFailures: prometheus.NewCounter(prometheus.CounterOpts{ - Name: "loki_dataobj_consumer_flush_failures_total", - Help: "Total number of flush failures", - }), commitFailures: prometheus.NewCounter(prometheus.CounterOpts{ Name: "loki_dataobj_consumer_commit_failures_total", Help: "Total number of commit failures", @@ -62,10 +57,9 @@ func (p *partitionOffsetMetrics) getCurrentOffset() float64 { func (p *partitionOffsetMetrics) register(reg prometheus.Registerer) error { collectors := []prometheus.Collector{ - p.currentOffset, - p.flushFailures, p.commitFailures, p.appendFailures, + p.currentOffset, p.processingDelay, } @@ -81,10 +75,9 @@ func (p *partitionOffsetMetrics) register(reg prometheus.Registerer) error { func (p *partitionOffsetMetrics) unregister(reg prometheus.Registerer) { collectors := []prometheus.Collector{ - p.currentOffset, - p.flushFailures, p.commitFailures, p.appendFailures, + p.currentOffset, p.processingDelay, } @@ -97,10 +90,6 @@ func (p *partitionOffsetMetrics) updateOffset(offset int64) { p.lastOffset.Store(offset) } -func (p *partitionOffsetMetrics) incFlushFailures() { - p.flushFailures.Inc() -} - func (p *partitionOffsetMetrics) incCommitFailures() { p.commitFailures.Inc() } diff --git a/pkg/dataobj/consumer/partition_processor.go b/pkg/dataobj/consumer/partition_processor.go index 0033e04c640f5..37e63a87fd7db 100644 --- a/pkg/dataobj/consumer/partition_processor.go +++ b/pkg/dataobj/consumer/partition_processor.go @@ -16,6 +16,7 @@ import ( "github.com/grafana/loki/v3/pkg/dataobj" "github.com/grafana/loki/v3/pkg/dataobj/metastore" + "github.com/grafana/loki/v3/pkg/dataobj/uploader" "github.com/grafana/loki/v3/pkg/kafka" ) @@ -26,15 +27,18 @@ type partitionProcessor struct { partition int32 tenantID []byte // Processing pipeline - records chan *kgo.Record - builder *dataobj.Builder - decoder *kafka.Decoder + records chan *kgo.Record + builder *dataobj.Builder + decoder *kafka.Decoder + uploader *uploader.Uploader + metastoreManager *metastore.Manager // Builder initialization - builderOnce sync.Once - builderCfg dataobj.BuilderConfig - bucket objstore.Bucket - metastoreManager *metastore.Manager + builderOnce sync.Once + builderCfg dataobj.BuilderConfig + bucket objstore.Bucket + flushBuffer *bytes.Buffer + // Metrics metrics *partitionOffsetMetrics @@ -46,7 +50,7 @@ type partitionProcessor struct { logger log.Logger } -func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg dataobj.BuilderConfig, bucket objstore.Bucket, tenantID string, virtualShard int32, topic string, partition int32, logger log.Logger, reg prometheus.Registerer) *partitionProcessor { +func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg dataobj.BuilderConfig, uploaderCfg uploader.Config, bucket objstore.Bucket, tenantID string, virtualShard int32, topic string, partition int32, logger log.Logger, reg prometheus.Registerer) *partitionProcessor { ctx, cancel := context.WithCancel(ctx) decoder, err := kafka.NewDecoder() if err != nil { @@ -55,6 +59,7 @@ func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg d reg = prometheus.WrapRegistererWith(prometheus.Labels{ "shard": strconv.Itoa(int(virtualShard)), "partition": strconv.Itoa(int(partition)), + "tenant": tenantID, "topic": topic, }, reg) @@ -63,11 +68,14 @@ func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg d level.Error(logger).Log("msg", "failed to register partition metrics", "err", err) } - metastoreManager, err := metastore.NewMetastoreManager(bucket, tenantID, logger, reg) - if err != nil { - level.Error(logger).Log("msg", "failed to create metastore manager", "err", err) - cancel() - return nil + uploader := uploader.New(uploaderCfg, bucket, tenantID) + if err := uploader.RegisterMetrics(reg); err != nil { + level.Error(logger).Log("msg", "failed to register uploader metrics", "err", err) + } + + metastoreManager := metastore.NewManager(bucket, tenantID, logger) + if err := metastoreManager.RegisterMetrics(reg); err != nil { + level.Error(logger).Log("msg", "failed to register metastore manager metrics", "err", err) } return &partitionProcessor{ @@ -84,6 +92,7 @@ func newPartitionProcessor(ctx context.Context, client *kgo.Client, builderCfg d bucket: bucket, tenantID: []byte(tenantID), metrics: metrics, + uploader: uploader, metastoreManager: metastoreManager, } } @@ -117,6 +126,7 @@ func (p *partitionProcessor) stop() { p.builder.UnregisterMetrics(p.reg) } p.metrics.unregister(p.reg) + p.uploader.UnregisterMetrics(p.reg) } // Drops records from the channel if the processor is stopped. @@ -137,7 +147,8 @@ func (p *partitionProcessor) Append(records []*kgo.Record) bool { func (p *partitionProcessor) initBuilder() error { var initErr error p.builderOnce.Do(func() { - builder, err := dataobj.NewBuilder(p.builderCfg, p.bucket, string(p.tenantID)) + // Dataobj builder + builder, err := dataobj.NewBuilder(p.builderCfg) if err != nil { initErr = err return @@ -147,6 +158,7 @@ func (p *partitionProcessor) initBuilder() error { return } p.builder = builder + p.flushBuffer = bytes.NewBuffer(make([]byte, 0, p.builderCfg.TargetObjectSize)) }) return initErr } @@ -176,42 +188,32 @@ func (p *partitionProcessor) processRecord(record *kgo.Record) { } if err := p.builder.Append(stream); err != nil { - if err != dataobj.ErrBufferFull { + if err != dataobj.ErrBuilderFull { level.Error(p.logger).Log("msg", "failed to append stream", "err", err) p.metrics.incAppendFailures() return } - backoff := backoff.New(p.ctx, backoff.Config{ - MinBackoff: 100 * time.Millisecond, - MaxBackoff: 10 * time.Second, - }) - - var flushResult dataobj.FlushResult - for backoff.Ongoing() { - flushResult, err = p.builder.Flush(p.ctx) - if err == nil { - break - } + flushedDataobjStats, err := p.builder.Flush(p.flushBuffer) + if err != nil { level.Error(p.logger).Log("msg", "failed to flush builder", "err", err) - p.metrics.incFlushFailures() - backoff.Wait() + return + } + + objectPath, err := p.uploader.Upload(p.ctx, p.flushBuffer) + if err != nil { + level.Error(p.logger).Log("msg", "failed to upload object", "err", err) + return } - if err := p.metastoreManager.UpdateMetastore(p.ctx, flushResult); err != nil { + if err := p.metastoreManager.UpdateMetastore(p.ctx, objectPath, flushedDataobjStats); err != nil { level.Error(p.logger).Log("msg", "failed to update metastore", "err", err) return } - backoff.Reset() - for backoff.Ongoing() { - err = p.client.CommitRecords(p.ctx, record) - if err == nil { - break - } + if err := p.commitRecords(record); err != nil { level.Error(p.logger).Log("msg", "failed to commit records", "err", err) - p.metrics.incCommitFailures() - backoff.Wait() + return } if err := p.builder.Append(stream); err != nil { @@ -220,3 +222,25 @@ func (p *partitionProcessor) processRecord(record *kgo.Record) { } } } + +func (p *partitionProcessor) commitRecords(record *kgo.Record) error { + backoff := backoff.New(p.ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 20, + }) + + var lastErr error + backoff.Reset() + for backoff.Ongoing() { + err := p.client.CommitRecords(p.ctx, record) + if err == nil { + return nil + } + level.Error(p.logger).Log("msg", "failed to commit records", "err", err) + p.metrics.incCommitFailures() + lastErr = err + backoff.Wait() + } + return lastErr +} diff --git a/pkg/dataobj/consumer/service.go b/pkg/dataobj/consumer/service.go index aee69dbd0dfd3..3522d3a9492b1 100644 --- a/pkg/dataobj/consumer/service.go +++ b/pkg/dataobj/consumer/service.go @@ -95,7 +95,7 @@ func (s *Service) handlePartitionsAssigned(ctx context.Context, client *kgo.Clie } for _, partition := range parts { - processor := newPartitionProcessor(ctx, client, s.cfg.BuilderConfig, s.bucket, tenant, virtualShard, topic, partition, s.logger, s.reg) + processor := newPartitionProcessor(ctx, client, s.cfg.BuilderConfig, s.cfg.UploaderConfig, s.bucket, tenant, virtualShard, topic, partition, s.logger, s.reg) s.partitionHandlers[topic][partition] = processor processor.start() } diff --git a/pkg/dataobj/internal/encoding/encoder.go b/pkg/dataobj/internal/encoding/encoder.go index a022d1795a28a..3ed57f5b3e2cd 100644 --- a/pkg/dataobj/internal/encoding/encoder.go +++ b/pkg/dataobj/internal/encoding/encoder.go @@ -167,6 +167,14 @@ func (enc *Encoder) Flush() error { return nil } +func (enc *Encoder) Reset(w streamio.Writer) { + enc.data.Reset() + enc.sections = nil + enc.curSection = nil + enc.w = w + enc.startOffset = len(magic) +} + func (enc *Encoder) append(data, metadata []byte) error { if enc.curSection == nil { return errElementNoExist diff --git a/pkg/dataobj/internal/sections/streams/streams.go b/pkg/dataobj/internal/sections/streams/streams.go index 138de989cc3cd..4d208fb1676d5 100644 --- a/pkg/dataobj/internal/sections/streams/streams.go +++ b/pkg/dataobj/internal/sections/streams/streams.go @@ -17,7 +17,6 @@ import ( "github.com/grafana/loki/v3/pkg/dataobj/internal/encoding" "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/datasetmd" "github.com/grafana/loki/v3/pkg/dataobj/internal/metadata/streamsmd" - "github.com/grafana/loki/v3/pkg/dataobj/internal/result" "github.com/grafana/loki/v3/pkg/dataobj/internal/streamio" "github.com/grafana/loki/v3/pkg/dataobj/internal/util/sliceclear" ) @@ -35,6 +34,7 @@ type Stream struct { Rows int // Number of rows in the stream. } +// Reset zeroes all values in the stream struct so it can be reused. func (s *Stream) Reset() { s.ID = 0 s.Labels = nil @@ -82,18 +82,8 @@ func New(metrics *Metrics, pageSize int) *Streams { } } -func (s *Streams) Iter() result.Seq[Stream] { - return result.Iter(func(yield func(Stream) bool) error { - for _, stream := range s.ordered { - if !yield(*stream) { - return nil - } - } - return nil - }) -} - -func (s *Streams) GetBounds() (time.Time, time.Time) { +// TimeRange returns the minimum and maximum timestamp across all streams. +func (s *Streams) TimeRange() (time.Time, time.Time) { return s.globalMinTimestamp, s.globalMaxTimestamp } diff --git a/pkg/dataobj/metastore/metastore.go b/pkg/dataobj/metastore/metastore.go index 08c0b00364c9a..ffa14672804b8 100644 --- a/pkg/dataobj/metastore/metastore.go +++ b/pkg/dataobj/metastore/metastore.go @@ -25,7 +25,6 @@ const ( var ( // Define our own builder config because metastore objects are significantly smaller. metastoreBuilderCfg = dataobj.BuilderConfig{ - SHAPrefixSize: 2, TargetObjectSize: 32 * 1024 * 1024, TargetPageSize: 4 * 1024 * 1024, BufferSize: 32 * 1024 * 1024, // 8x page size @@ -40,15 +39,13 @@ type Manager struct { bucket objstore.Bucket logger log.Logger backoff *backoff.Backoff + buf *bytes.Buffer builderOnce sync.Once } -func NewMetastoreManager(bucket objstore.Bucket, tenantID string, logger log.Logger, reg prometheus.Registerer) (*Manager, error) { +func NewManager(bucket objstore.Bucket, tenantID string, logger log.Logger) *Manager { metrics := newMetastoreMetrics() - if err := metrics.register(reg); err != nil { - return nil, err - } return &Manager{ bucket: bucket, @@ -60,62 +57,73 @@ func NewMetastoreManager(bucket objstore.Bucket, tenantID string, logger log.Log MaxBackoff: 10 * time.Second, }), builderOnce: sync.Once{}, - }, nil + } +} + +func (m *Manager) RegisterMetrics(reg prometheus.Registerer) error { + return m.metrics.register(reg) +} + +func (m *Manager) UnregisterMetrics(reg prometheus.Registerer) { + m.metrics.unregister(reg) } func (m *Manager) initBuilder() error { var initErr error m.builderOnce.Do(func() { - metastoreBuilder, err := dataobj.NewBuilder(metastoreBuilderCfg, m.bucket, m.tenantID) + metastoreBuilder, err := dataobj.NewBuilder(metastoreBuilderCfg) if err != nil { initErr = err return } + m.buf = bytes.NewBuffer(make([]byte, 0, metastoreBuilderCfg.TargetObjectSize)) m.metastoreBuilder = metastoreBuilder }) return initErr } -func (m *Manager) UpdateMetastore(ctx context.Context, flushResult dataobj.FlushResult) error { +// UpdateMetastore adds provided dataobj path to the metastore. Flush stats are used to determine the stored metadata about this dataobj. +func (m *Manager) UpdateMetastore(ctx context.Context, dataobjPath string, flushStats dataobj.FlushStats) error { var err error - start := time.Now() - defer m.metrics.observeMetastoreProcessing(start) + processingTime := prometheus.NewTimer(m.metrics.metastoreProcessingTime) + defer processingTime.ObserveDuration() // Initialize builder if this is the first call for this partition if err := m.initBuilder(); err != nil { return err } - minTimestamp, maxTimestamp := flushResult.MinTimestamp, flushResult.MaxTimestamp + minTimestamp, maxTimestamp := flushStats.MinTimestamp, flushStats.MaxTimestamp // Work our way through the metastore objects window by window, updating & creating them as needed. // Each one handles its own retries in order to keep making progress in the event of a failure. minMetastoreWindow := minTimestamp.Truncate(metastoreWindowSize) maxMetastoreWindow := maxTimestamp.Truncate(metastoreWindowSize) - for metastoreWindow := minMetastoreWindow; metastoreWindow.Compare(maxMetastoreWindow) <= 0; metastoreWindow = metastoreWindow.Add(metastoreWindowSize) { + for metastoreWindow := minMetastoreWindow; !metastoreWindow.After(maxMetastoreWindow); metastoreWindow = metastoreWindow.Add(metastoreWindowSize) { metastorePath := fmt.Sprintf("tenant-%s/metastore/%s.store", m.tenantID, metastoreWindow.Format(time.RFC3339)) m.backoff.Reset() for m.backoff.Ongoing() { err = m.bucket.GetAndReplace(ctx, metastorePath, func(existing io.Reader) (io.Reader, error) { - buf, err := io.ReadAll(existing) + m.buf.Reset() + _, err := io.Copy(m.buf, existing) if err != nil { return nil, err } m.metastoreBuilder.Reset() - if len(buf) > 0 { - replayStart := time.Now() - object := dataobj.FromReaderAt(bytes.NewReader(buf), int64(len(buf))) + if m.buf.Len() > 0 { + replayDuration := prometheus.NewTimer(m.metrics.metastoreReplayTime) + object := dataobj.FromReaderAt(bytes.NewReader(m.buf.Bytes()), int64(m.buf.Len())) if err := m.readFromExisting(ctx, object); err != nil { return nil, err } - m.metrics.observeMetastoreReplay(replayStart) + replayDuration.ObserveDuration() } - encodingStart := time.Now() + encodingDuration := prometheus.NewTimer(m.metrics.metastoreEncodingTime) - ls := fmt.Sprintf("{__start__=\"%d\", __end__=\"%d\", __path__=\"%s\"}", minTimestamp.UnixNano(), maxTimestamp.UnixNano(), flushResult.Path) + ls := fmt.Sprintf("{__start__=\"%d\", __end__=\"%d\", __path__=\"%s\"}", minTimestamp.UnixNano(), maxTimestamp.UnixNano(), dataobjPath) err = m.metastoreBuilder.Append(logproto.Stream{ Labels: ls, Entries: []logproto.Entry{{Line: ""}}, @@ -124,12 +132,13 @@ func (m *Manager) UpdateMetastore(ctx context.Context, flushResult dataobj.Flush return nil, err } - newMetastore, err := m.metastoreBuilder.FlushToBuffer() + m.buf.Reset() + _, err = m.metastoreBuilder.Flush(m.buf) if err != nil { return nil, err } - m.metrics.observeMetastoreEncoding(encodingStart) - return newMetastore, nil + encodingDuration.ObserveDuration() + return m.buf, nil }) if err == nil { level.Info(m.logger).Log("msg", "successfully merged & updated metastore", "metastore", metastorePath) @@ -145,6 +154,7 @@ func (m *Manager) UpdateMetastore(ctx context.Context, flushResult dataobj.Flush return err } +// readFromExisting reads the provided metastore object and appends the streams to the builder so it can be later modified. func (m *Manager) readFromExisting(ctx context.Context, object *dataobj.Object) error { // Fetch sections si, err := object.Metadata(ctx) diff --git a/pkg/dataobj/metastore/metastore_test.go b/pkg/dataobj/metastore/metastore_test.go index 582882917b5e9..363e99a6b8b56 100644 --- a/pkg/dataobj/metastore/metastore_test.go +++ b/pkg/dataobj/metastore/metastore_test.go @@ -2,12 +2,10 @@ package metastore import ( "context" - "fmt" "testing" "time" "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/require" "github.com/grafana/dskit/backoff" @@ -21,8 +19,7 @@ func BenchmarkWriteMetastores(t *testing.B) { bucket := objstore.NewInMemBucket() tenantID := "test-tenant" - m, err := NewMetastoreManager(bucket, tenantID, log.NewNopLogger(), prometheus.DefaultRegisterer) - require.NoError(t, err) + m := NewManager(bucket, tenantID, log.NewNopLogger()) // Set limits for the test m.backoff = backoff.New(context.TODO(), backoff.Config{ @@ -34,10 +31,9 @@ func BenchmarkWriteMetastores(t *testing.B) { // Add test data spanning multiple metastore windows now := time.Date(2025, 1, 1, 15, 0, 0, 0, time.UTC) - flushResults := make([]dataobj.FlushResult, 1000) + flushStats := make([]dataobj.FlushStats, 1000) for i := 0; i < 1000; i++ { - flushResults[i] = dataobj.FlushResult{ - Path: fmt.Sprintf("test-dataobj-path-%d", i), + flushStats[i] = dataobj.FlushStats{ MinTimestamp: now.Add(-1 * time.Hour).Add(time.Duration(i) * time.Millisecond), MaxTimestamp: now, } @@ -47,7 +43,7 @@ func BenchmarkWriteMetastores(t *testing.B) { t.ReportAllocs() for i := 0; i < t.N; i++ { // Test writing metastores - err = m.UpdateMetastore(ctx, flushResults[i%len(flushResults)]) + err := m.UpdateMetastore(ctx, "path", flushStats[i%len(flushStats)]) require.NoError(t, err) } @@ -59,8 +55,7 @@ func TestWriteMetastores(t *testing.T) { bucket := objstore.NewInMemBucket() tenantID := "test-tenant" - m, err := NewMetastoreManager(bucket, tenantID, log.NewNopLogger(), prometheus.DefaultRegisterer) - require.NoError(t, err) + m := NewManager(bucket, tenantID, log.NewNopLogger()) // Set limits for the test m.backoff = backoff.New(context.TODO(), backoff.Config{ @@ -72,8 +67,7 @@ func TestWriteMetastores(t *testing.T) { // Add test data spanning multiple metastore windows now := time.Date(2025, 1, 1, 15, 0, 0, 0, time.UTC) - flushResult := dataobj.FlushResult{ - Path: "test-dataobj-path", + flushStats := dataobj.FlushStats{ MinTimestamp: now.Add(-1 * time.Hour), MaxTimestamp: now, } @@ -81,7 +75,7 @@ func TestWriteMetastores(t *testing.T) { require.Len(t, bucket.Objects(), 0) // Test writing metastores - err = m.UpdateMetastore(ctx, flushResult) + err := m.UpdateMetastore(ctx, "test-dataobj-path", flushStats) require.NoError(t, err) require.Len(t, bucket.Objects(), 1) @@ -90,13 +84,12 @@ func TestWriteMetastores(t *testing.T) { originalSize = len(obj) } - flushResult2 := dataobj.FlushResult{ - Path: "different-test-dataobj-path", + flushResult2 := dataobj.FlushStats{ MinTimestamp: now.Add(-15 * time.Minute), MaxTimestamp: now, } - err = m.UpdateMetastore(ctx, flushResult2) + err = m.UpdateMetastore(ctx, "different-dataobj-path", flushResult2) require.NoError(t, err) require.Len(t, bucket.Objects(), 1) diff --git a/pkg/dataobj/metrics.go b/pkg/dataobj/metrics.go index fa18d5159d0b2..9a029363e56da 100644 --- a/pkg/dataobj/metrics.go +++ b/pkg/dataobj/metrics.go @@ -16,13 +16,12 @@ type metrics struct { streams *streams.Metrics encoding *encoding.Metrics - shaPrefixSize prometheus.Gauge targetPageSize prometheus.Gauge targetObjectSize prometheus.Gauge - appendTime prometheus.Histogram - buildTime prometheus.Histogram - flushTime prometheus.Histogram + appendTime prometheus.Histogram + buildTime prometheus.Histogram + flushFailures prometheus.Counter sizeEstimate prometheus.Gauge builtSize prometheus.Histogram @@ -34,15 +33,6 @@ func newMetrics() *metrics { logs: logs.NewMetrics(), streams: streams.NewMetrics(), encoding: encoding.NewMetrics(), - - shaPrefixSize: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "loki", - Subsystem: "dataobj", - Name: "config_sha_prefix_size", - - Help: "Configured SHA prefix size.", - }), - targetPageSize: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "loki", Subsystem: "dataobj", @@ -85,19 +75,6 @@ func newMetrics() *metrics { NativeHistogramMinResetDuration: 0, }), - flushTime: prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "loki", - Subsystem: "dataobj", - Name: "flush_time_seconds", - - Help: "Time taken flushing data objects to object storage.", - - Buckets: prometheus.DefBuckets, - NativeHistogramBucketFactor: 1.1, - NativeHistogramMaxBucketNumber: 100, - NativeHistogramMinResetDuration: 0, - }), - sizeEstimate: prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "loki", Subsystem: "dataobj", @@ -117,12 +94,19 @@ func newMetrics() *metrics { NativeHistogramMaxBucketNumber: 100, NativeHistogramMinResetDuration: 0, }), + + flushFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: "loki", + Subsystem: "dataobj", + Name: "flush_failures_total", + + Help: "Total number of flush failures.", + }), } } // ObserveConfig updates config metrics based on the provided [BuilderConfig]. func (m *metrics) ObserveConfig(cfg BuilderConfig) { - m.shaPrefixSize.Set(float64(cfg.SHAPrefixSize)) m.targetPageSize.Set(float64(cfg.TargetPageSize)) m.targetObjectSize.Set(float64(cfg.TargetObjectSize)) } @@ -135,16 +119,15 @@ func (m *metrics) Register(reg prometheus.Registerer) error { errs = append(errs, m.streams.Register(reg)) errs = append(errs, m.encoding.Register(reg)) - errs = append(errs, reg.Register(m.shaPrefixSize)) errs = append(errs, reg.Register(m.targetPageSize)) errs = append(errs, reg.Register(m.targetObjectSize)) errs = append(errs, reg.Register(m.appendTime)) errs = append(errs, reg.Register(m.buildTime)) - errs = append(errs, reg.Register(m.flushTime)) errs = append(errs, reg.Register(m.sizeEstimate)) errs = append(errs, reg.Register(m.builtSize)) + errs = append(errs, reg.Register(m.flushFailures)) return errors.Join(errs...) } @@ -155,14 +138,13 @@ func (m *metrics) Unregister(reg prometheus.Registerer) { m.streams.Unregister(reg) m.encoding.Unregister(reg) - reg.Unregister(m.shaPrefixSize) reg.Unregister(m.targetPageSize) reg.Unregister(m.targetObjectSize) reg.Unregister(m.appendTime) reg.Unregister(m.buildTime) - reg.Unregister(m.flushTime) reg.Unregister(m.sizeEstimate) reg.Unregister(m.builtSize) + reg.Unregister(m.flushFailures) } diff --git a/pkg/dataobj/uploader/metrics.go b/pkg/dataobj/uploader/metrics.go new file mode 100644 index 0000000000000..49a1d7dc82d94 --- /dev/null +++ b/pkg/dataobj/uploader/metrics.go @@ -0,0 +1,63 @@ +package uploader + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +type metrics struct { + uploadTime prometheus.Histogram + uploadFailures prometheus.Counter + shaPrefixSize prometheus.Gauge +} + +func newMetrics(shaPrefixSize int) *metrics { + metrics := &metrics{ + uploadFailures: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "loki_dataobj_consumer_upload_failures_total", + Help: "Total number of upload failures", + }), + uploadTime: prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: "loki", + Subsystem: "dataobj", + Name: "upload_time_seconds", + Help: "Time taken writing data objects to object storage.", + Buckets: prometheus.DefBuckets, + }), + shaPrefixSize: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "loki", + Subsystem: "dataobj", + Name: "sha_prefix_size", + Help: "The size of the SHA prefix used for object storage keys.", + }), + } + + metrics.shaPrefixSize.Set(float64(shaPrefixSize)) + return metrics +} + +func (m *metrics) register(reg prometheus.Registerer) error { + collectors := []prometheus.Collector{ + m.uploadFailures, + m.uploadTime, + } + + for _, collector := range collectors { + if err := reg.Register(collector); err != nil { + if _, ok := err.(prometheus.AlreadyRegisteredError); !ok { + return err + } + } + } + return nil +} + +func (m *metrics) unregister(reg prometheus.Registerer) { + collectors := []prometheus.Collector{ + m.uploadFailures, + m.uploadTime, + } + + for _, collector := range collectors { + reg.Unregister(collector) + } +} diff --git a/pkg/dataobj/uploader/uploader.go b/pkg/dataobj/uploader/uploader.go new file mode 100644 index 0000000000000..b19a8699b82aa --- /dev/null +++ b/pkg/dataobj/uploader/uploader.go @@ -0,0 +1,93 @@ +package uploader + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "flag" + "fmt" + "time" + + "github.com/grafana/dskit/backoff" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/objstore" +) + +type Config struct { + // SHAPrefixSize is the size of the SHA prefix used for splitting object storage keys + SHAPrefixSize int +} + +// RegisterFlagsWithPrefix registers flags with the given prefix. +func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.IntVar(&cfg.SHAPrefixSize, prefix+"sha-prefix-size", 2, "The size of the SHA prefix to use for generating object storage keys for data objects.") +} + +func (cfg *Config) Validate() error { + if cfg.SHAPrefixSize <= 0 { + return fmt.Errorf("SHAPrefixSize must be greater than 0") + } + return nil +} + +type Uploader struct { + SHAPrefixSize int + bucket objstore.Bucket + tenantID string + metrics *metrics +} + +func New(cfg Config, bucket objstore.Bucket, tenantID string) *Uploader { + metrics := newMetrics(cfg.SHAPrefixSize) + + return &Uploader{ + SHAPrefixSize: cfg.SHAPrefixSize, + bucket: bucket, + tenantID: tenantID, + metrics: metrics, + } +} + +func (d *Uploader) RegisterMetrics(reg prometheus.Registerer) error { + return d.metrics.register(reg) +} + +func (d *Uploader) UnregisterMetrics(reg prometheus.Registerer) { + d.metrics.unregister(reg) +} + +// getKey determines the key in object storage to upload the object to, based on our path scheme. +func (d *Uploader) getKey(object *bytes.Buffer) string { + sum := sha256.Sum224(object.Bytes()) + sumStr := hex.EncodeToString(sum[:]) + + return fmt.Sprintf("tenant-%s/objects/%s/%s", d.tenantID, sumStr[:d.SHAPrefixSize], sumStr[d.SHAPrefixSize:]) +} + +// Upload uploads an object to the configured bucket and returns the key. +func (d *Uploader) Upload(ctx context.Context, object *bytes.Buffer) (string, error) { + timer := prometheus.NewTimer(d.metrics.uploadTime) + defer timer.ObserveDuration() + + objectPath := d.getKey(object) + + backoff := backoff.New(ctx, backoff.Config{ + MinBackoff: 100 * time.Millisecond, + MaxBackoff: 10 * time.Second, + MaxRetries: 20, + }) + + var lastErr error + for backoff.Ongoing() { + err := d.bucket.Upload(ctx, objectPath, bytes.NewReader(object.Bytes())) + if err == nil { + return objectPath, nil + } + lastErr = err + backoff.Wait() + } + + d.metrics.uploadFailures.Inc() + return "", fmt.Errorf("uploading object after %d retries: %w", backoff.NumRetries(), lastErr) +} diff --git a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go index 920f3ace74715..f6538b894edb4 100644 --- a/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go +++ b/vendor/github.com/thanos-io/objstore/providers/filesystem/filesystem.go @@ -4,6 +4,7 @@ package filesystem import ( + "bytes" "context" "fmt" "io" @@ -292,7 +293,7 @@ func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reade defer r.Close() } - newContent, err := f(r) + newContent, err := f(wrapReader(r)) if err != nil { return err } @@ -305,6 +306,13 @@ func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reade return os.WriteFile(file, content, 0600) } +func wrapReader(r io.Reader) io.Reader { + if r == nil { + return bytes.NewReader(nil) + } + return r +} + func isDirEmpty(name string) (ok bool, err error) { f, err := os.Open(filepath.Clean(name)) if os.IsNotExist(err) { diff --git a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go index cd9105ca31c1b..ee702963190e4 100644 --- a/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go +++ b/vendor/github.com/thanos-io/objstore/providers/gcs/gcs.go @@ -395,9 +395,9 @@ func (b *Bucket) GetAndReplace(ctx context.Context, name string, f func(io.Reade return b.upload(ctx, name, newContent, generation, mustNotExist) } -func wrapReader(r *storage.Reader) io.ReadCloser { +func wrapReader(r *storage.Reader) io.Reader { if r == nil { - return io.NopCloser(bytes.NewReader(nil)) + return bytes.NewReader(nil) } return r diff --git a/vendor/modules.txt b/vendor/modules.txt index a135d97391855..92cceb83f10b3 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1633,7 +1633,7 @@ github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/mock github.com/stretchr/testify/require github.com/stretchr/testify/suite -# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 +# github.com/thanos-io/objstore v0.0.0-20250115091151-a54d0f04b42a => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6 ## explicit; go 1.22 github.com/thanos-io/objstore github.com/thanos-io/objstore/clientutil @@ -2562,4 +2562,4 @@ sigs.k8s.io/yaml/goyaml.v2 # github.com/grafana/regexp => github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc # github.com/grafana/loki/pkg/push => ./pkg/push # github.com/influxdata/go-syslog/v3 => github.com/leodido/go-syslog/v4 v4.2.0 -# github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250128154815-d7e99f81f866 +# github.com/thanos-io/objstore => github.com/grafana/objstore v0.0.0-20250203161329-90e33e9afde6 From 6d49d911dc4bd802f350a99f5598c903cf236a90 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 15:51:51 +0000 Subject: [PATCH 18/33] fix(deps): update module golang.org/x/sys to v0.30.0 (main) (#16087) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 +- vendor/golang.org/x/sys/cpu/cpu.go | 3 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 21 +++- vendor/golang.org/x/sys/unix/auxv.go | 36 ++++++ .../golang.org/x/sys/unix/auxv_unsupported.go | 13 ++ .../golang.org/x/sys/unix/syscall_solaris.go | 87 +++++++++++++ vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 ++- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 114 ++++++++++++++++++ .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 +- vendor/modules.txt | 2 +- 41 files changed, 403 insertions(+), 11 deletions(-) create mode 100644 vendor/golang.org/x/sys/unix/auxv.go create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go diff --git a/go.mod b/go.mod index 2ae12f5d18c2e..aaf1cc9a29a46 100644 --- a/go.mod +++ b/go.mod @@ -101,7 +101,7 @@ require ( golang.org/x/crypto v0.32.0 golang.org/x/net v0.34.0 golang.org/x/sync v0.11.0 - golang.org/x/sys v0.29.0 + golang.org/x/sys v0.30.0 golang.org/x/time v0.9.0 google.golang.org/api v0.219.0 google.golang.org/grpc v1.70.0 diff --git a/go.sum b/go.sum index 0e1a136281545..aaca3e8d03276 100644 --- a/go.sum +++ b/go.sum @@ -1458,8 +1458,8 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b21d56..9c105f23afcdc 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a6807861e7..1e642f3304fa8 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 0000000000000..37a82528f580f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 0000000000000..1200487f2e86c --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af064ddc..abc3955477c7d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3fecd7..4f432bfe8feee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e320505f..75207613c785d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f02529..c68acda53522d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4a7c9f..a8c607ab86b51 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f308d6d..18563dd8d33a0 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573fe3fb..22912cdaa9448 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e0645acd..29344eb37ab55 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25bd16c6..20d51fb96a897 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66d955a..321b60902ae5c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c044190235..9bacdf1e27910 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737cd464f..c2242726156a9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467edf0c..6270c8ee13e3f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc60b714..9966c1941f830 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c55c1fd..848e5fcc42e6f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f0400977b36..669b2adb80b77 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a313322c54..4834e57514e44 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb8da6..c6545413c45b4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820cbc2e..c79aaff306ae3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf456567..5eb450695e95a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b09f6..05e5029744586 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe88404..38c53ec51bb3e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da0ab2..31d2e71a18e17 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1efff21..f4184a336b0e0 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5ef856..05b9962278f27 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a82ab..43a256e9e6758 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a021f6..eea5ddfc22077 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7ed4b2..0d777bfbb1408 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d54604..b44636502561e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416dad1cc..0c7d21c188165 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766f59d..8405391698787 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825bb926..fcf1b790d6cfd 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77cda84..52d15b5f9d459 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148dcbb3d..a46abe6472054 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/modules.txt b/vendor/modules.txt index 92cceb83f10b3..c72c711d622c4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1951,7 +1951,7 @@ golang.org/x/oauth2/jwt golang.org/x/sync/errgroup golang.org/x/sync/semaphore golang.org/x/sync/singleflight -# golang.org/x/sys v0.29.0 +# golang.org/x/sys v0.30.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/plan9 From 66b2721cfa27f892abae0b8d024490cbeb77afa6 Mon Sep 17 00:00:00 2001 From: Robert Fratto Date: Tue, 4 Feb 2025 11:42:03 -0500 Subject: [PATCH 19/33] chore(dataobj): do not panic on unrecognized compression type (#16088) --- pkg/dataobj/internal/dataset/page.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/dataobj/internal/dataset/page.go b/pkg/dataobj/internal/dataset/page.go index c0b2653bbc65e..e1dff653b4fce 100644 --- a/pkg/dataobj/internal/dataset/page.go +++ b/pkg/dataobj/internal/dataset/page.go @@ -112,9 +112,12 @@ func (p *MemPage) reader(compression datasetmd.CompressionType) (presence io.Rea case datasetmd.COMPRESSION_TYPE_ZSTD: zr := &fixedZstdReader{page: p, data: compressedValuesData} return bitmapReader, zr, nil - } - panic(fmt.Sprintf("dataset.MemPage.reader: unknown compression type %q", compression.String())) + default: + // We do *not* want to panic here, as we may be trying to read a page from + // a newer format. + return nil, nil, fmt.Errorf("unknown compression type %q", compression.String()) + } } var snappyPool = sync.Pool{ From fe17cc0c370bc06c0dffb913936edb0d91e90fd7 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 16:48:00 +0000 Subject: [PATCH 20/33] fix(deps): update module golang.org/x/time to v0.10.0 (main) (#16089) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/golang.org/x/time/rate/rate.go | 11 +++++++++-- vendor/modules.txt | 2 +- 4 files changed, 13 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index aaf1cc9a29a46..393dbae8b21b0 100644 --- a/go.mod +++ b/go.mod @@ -102,7 +102,7 @@ require ( golang.org/x/net v0.34.0 golang.org/x/sync v0.11.0 golang.org/x/sys v0.30.0 - golang.org/x/time v0.9.0 + golang.org/x/time v0.10.0 google.golang.org/api v0.219.0 google.golang.org/grpc v1.70.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index aaca3e8d03276..613468e3b8e1d 100644 --- a/go.sum +++ b/go.sum @@ -1485,8 +1485,8 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4= +golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab63704..ec5f0cdd0c0e6 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -405,8 +405,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/modules.txt b/vendor/modules.txt index c72c711d622c4..f289f463fe475 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1986,7 +1986,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.9.0 +# golang.org/x/time v0.10.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.28.0 From 8c2406e579aaa0e45f13f48de91f25586e477bc0 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 17:26:37 +0000 Subject: [PATCH 21/33] fix(deps): update module golang.org/x/text to v0.22.0 (main) (#16090) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index 393dbae8b21b0..704546489b33c 100644 --- a/go.mod +++ b/go.mod @@ -146,7 +146,7 @@ require ( go.opentelemetry.io/collector/pdata v1.25.0 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/oauth2 v0.26.0 - golang.org/x/text v0.21.0 + golang.org/x/text v0.22.0 google.golang.org/protobuf v1.36.4 gotest.tools v2.2.0+incompatible k8s.io/apimachinery v0.32.1 diff --git a/go.sum b/go.sum index 613468e3b8e1d..2dc4062022a6a 100644 --- a/go.sum +++ b/go.sum @@ -1479,8 +1479,8 @@ golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= diff --git a/vendor/modules.txt b/vendor/modules.txt index f289f463fe475..8c63de7f6e657 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1962,7 +1962,7 @@ golang.org/x/sys/windows/svc/eventlog # golang.org/x/term v0.28.0 ## explicit; go 1.18 golang.org/x/term -# golang.org/x/text v0.21.0 +# golang.org/x/text v0.22.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/encoding From c4e916da86fbd4ab9b1ee72468c74c82dac253e6 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 21:09:09 +0000 Subject: [PATCH 22/33] fix(deps): update module github.com/aws/aws-sdk-go-v2/config to v1.29.5 (main) (#16093) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- tools/lambda-promtail/go.mod | 6 +++--- tools/lambda-promtail/go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index e28f71cc05f84..9a5862b1a9df5 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.5 require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.36.0 - github.com/aws/aws-sdk-go-v2/config v1.29.4 + github.com/aws/aws-sdk-go-v2/config v1.29.5 github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 @@ -27,7 +27,7 @@ require ( github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.17.57 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.58 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.31 // indirect @@ -39,7 +39,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 // indirect github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 // indirect github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 // indirect github.com/aws/smithy-go v1.22.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 // indirect diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index 39e2e96f5926d..7d58959430055 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -52,10 +52,10 @@ github.com/aws/aws-sdk-go-v2 v1.36.0 h1:b1wM5CcE65Ujwn565qcwgtOTT1aT4ADOHHgglKjG github.com/aws/aws-sdk-go-v2 v1.36.0/go.mod h1:5PMILGVKiW32oDzjj6RU52yrNrDPUHcbZQYr1sM7qmM= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8 h1:zAxi9p3wsZMIaVCdoiQp2uZ9k1LsZvmAnoTBeZPXom0= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.8/go.mod h1:3XkePX5dSaxveLAYY7nsbsZZrKxCyEuE5pM4ziFxyGg= -github.com/aws/aws-sdk-go-v2/config v1.29.4 h1:ObNqKsDYFGr2WxnoXKOhCvTlf3HhwtoGgc+KmZ4H5yg= -github.com/aws/aws-sdk-go-v2/config v1.29.4/go.mod h1:j2/AF7j/qxVmsNIChw1tWfsVKOayJoGRDjg1Tgq7NPk= -github.com/aws/aws-sdk-go-v2/credentials v1.17.57 h1:kFQDsbdBAR3GZsB8xA+51ptEnq9TIj3tS4MuP5b+TcQ= -github.com/aws/aws-sdk-go-v2/credentials v1.17.57/go.mod h1:2kerxPUUbTagAr/kkaHiqvj/bcYHzi2qiJS/ZinllU0= +github.com/aws/aws-sdk-go-v2/config v1.29.5 h1:4lS2IB+wwkj5J43Tq/AwvnscBerBJtQQ6YS7puzCI1k= +github.com/aws/aws-sdk-go-v2/config v1.29.5/go.mod h1:SNzldMlDVbN6nWxM7XsUiNXPSa1LWlqiXtvh/1PrJGg= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58 h1:/d7FUpAPU8Lf2KUdjniQvfNdlMID0Sd9pS23FJ3SS9Y= +github.com/aws/aws-sdk-go-v2/credentials v1.17.58/go.mod h1:aVYW33Ow10CyMQGFgC0ptMRIqJWvJ4nxZb0sUiuQT/A= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27 h1:7lOW8NUwE9UZekS1DYoiPdVAqZ6A+LheHWb+mHbNOq8= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.27/go.mod h1:w1BASFIPOPUae7AgaH4SbjNbfdkxuggLyGfNFTn8ITY= github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.31 h1:lWm9ucLSRFiI4dQQafLrEOmEDGry3Swrz0BIRdiHJqQ= @@ -80,8 +80,8 @@ github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uU github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13/go.mod h1:tvqlFoja8/s0o+UruA1Nrezo/df0PzdunMDDurUfg6U= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.12 h1:fqg6c1KVrc3SYWma/egWue5rKI4G2+M4wMQN2JosNAA= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.12/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13 h1:3LXNnmtH3TURctC23hnC0p/39Q5gre3FI7BNOiDcVWc= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.13/go.mod h1:7Yn+p66q/jt38qMoVfNvjbm3D89mGBnkwDcijgtih8w= github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ= github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= From 1d6c0eba5160935ef34fe12ab7e2a7047894ef84 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Tue, 4 Feb 2025 21:30:46 +0000 Subject: [PATCH 23/33] fix(deps): update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.75.3 (main) (#16094) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- tools/lambda-promtail/go.mod | 2 +- tools/lambda-promtail/go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/lambda-promtail/go.mod b/tools/lambda-promtail/go.mod index 9a5862b1a9df5..e763a3059dac0 100644 --- a/tools/lambda-promtail/go.mod +++ b/tools/lambda-promtail/go.mod @@ -8,7 +8,7 @@ require ( github.com/aws/aws-lambda-go v1.47.0 github.com/aws/aws-sdk-go-v2 v1.36.0 github.com/aws/aws-sdk-go-v2/config v1.29.5 - github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 + github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 github.com/go-kit/log v0.2.1 github.com/gogo/protobuf v1.3.2 github.com/golang/snappy v0.0.4 diff --git a/tools/lambda-promtail/go.sum b/tools/lambda-promtail/go.sum index 7d58959430055..231c27a5d55dc 100644 --- a/tools/lambda-promtail/go.sum +++ b/tools/lambda-promtail/go.sum @@ -74,8 +74,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12 h1:O+8vD2rG github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.12/go.mod h1:usVdWJaosa66NMvmCrr08NcWDBRv4E6+YFG2pUdw1Lk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12 h1:tkVNm99nkJnFo1H9IIQb5QkCiPcvCDn3Pos+IeTbGRA= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.12/go.mod h1:dIVlquSPUMqEJtx2/W17SM2SuESRaVEhEV9alcMqxjw= -github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2 h1:dyC+iA2+Yc7iDMDh0R4eT6fi8TgBduc+BOWCy6Br0/o= -github.com/aws/aws-sdk-go-v2/service/s3 v1.75.2/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3 h1:JBod0SnNqcWQ0+uAyzeRFG1zCHotW8DukumYYyNy0zo= +github.com/aws/aws-sdk-go-v2/service/s3 v1.75.3/go.mod h1:FHSHmyEUkzRbaFFqqm6bkLAOQHgqhsLmfCahvCBMiyA= github.com/aws/aws-sdk-go-v2/service/sso v1.24.14 h1:c5WJ3iHz7rLIgArznb3JCSQT3uUMiz9DLZhIX+1G8ok= github.com/aws/aws-sdk-go-v2/service/sso v1.24.14/go.mod h1:+JJQTxB6N4niArC14YNtxcQtwEqzS3o9Z32n7q33Rfs= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.13 h1:f1L/JtUkVODD+k1+IiSJUUv8A++2qVr+Xvb3xWXETMU= From bebd1532b7cbd02afb6396a89c981c45d46b05f3 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 5 Feb 2025 08:26:17 +0100 Subject: [PATCH 24/33] ci: Fix `gochecksumtype` linter error (#16096) Signed-off-by: Christian Haudum --- pkg/logql/syntax/walk_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/logql/syntax/walk_test.go b/pkg/logql/syntax/walk_test.go index 9f0a5015ed731..57cfdeb39c32b 100644 --- a/pkg/logql/syntax/walk_test.go +++ b/pkg/logql/syntax/walk_test.go @@ -79,6 +79,8 @@ func Test_AppendMatchers(t *testing.T) { switch me := e.(type) { case *MatchersExpr: me.AppendMatchers(test.matchers) + default: + // do nothing } }) require.Equal(t, test.want, expr.String()) From 9a36eca5e08d68262cddf2ce1a5866d88b61ec8a Mon Sep 17 00:00:00 2001 From: Trevor Whitney Date: Wed, 5 Feb 2025 01:35:58 -0700 Subject: [PATCH 25/33] ci: fix docker driver platforms (fixes k239 release) (#16095) We only publish amd64 and arm64 versions of the docker driver, and only have a build image for those platforms, so this removes linux/arm as a target. Co-authored-by: Christian Haudum --- .github/release-workflows.jsonnet | 2 +- .github/workflows/minor-release-pr.yml | 3 --- .github/workflows/patch-release-pr.yml | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/.github/release-workflows.jsonnet b/.github/release-workflows.jsonnet index c0f4284673210..fd6f22f86bfde 100644 --- a/.github/release-workflows.jsonnet +++ b/.github/release-workflows.jsonnet @@ -30,7 +30,7 @@ local imageJobs = { 'loki-canary-boringcrypto': build.image('loki-canary-boringcrypto', 'cmd/loki-canary-boringcrypto', platform=platforms.all), promtail: build.image('promtail', 'clients/cmd/promtail', platform=platforms.all), querytee: build.image('loki-query-tee', 'cmd/querytee', platform=platforms.amd), - 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=platforms.all), + 'loki-docker-driver': build.dockerPlugin('loki-docker-driver', dockerPluginDir, buildImage=buildImage, platform=[r.forPlatform('linux/amd64'), r.forPlatform('linux/arm64')]), }; local weeklyImageJobs = { diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 7ce894025d039..80d0bccefedb7 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -696,9 +696,6 @@ jobs: - arch: "linux/arm64" runs_on: - "github-hosted-ubuntu-arm64-small" - - arch: "linux/arm" - runs_on: - - "github-hosted-ubuntu-arm64-small" promtail: needs: - "version" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index 8f2d1f1e302ad..cc601fc88d044 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -696,9 +696,6 @@ jobs: - arch: "linux/arm64" runs_on: - "github-hosted-ubuntu-arm64-small" - - arch: "linux/arm" - runs_on: - - "github-hosted-ubuntu-arm64-small" promtail: needs: - "version" From 1187a73667229f2a6cda6aee2a1c023084617f9e Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 5 Feb 2025 09:47:27 +0100 Subject: [PATCH 26/33] chore: LogQL syntax and AST simplification and cosistency (#16028) This PR aims for making the grammar file (syntax.y) for the LogQL syntax more readable and the AST and its expressions more consistent in naming. You could say these are cosmetics, and yes they are, but if they can help developers better and faster understand the code it's a win. Personally I found the grammar yacc file intimidating until I understood that it was way more complicated that it needed to be. I hope it enables future additions to the syntax to be easier to implement. Signed-off-by: Christian Haudum --- Makefile | 4 +- pkg/logql/downstream_test.go | 6 +- pkg/logql/engine.go | 2 +- pkg/logql/log/drop_labels.go | 24 +- pkg/logql/log/drop_labels_test.go | 12 +- pkg/logql/log/keep_labels.go | 24 +- pkg/logql/log/keep_labels_test.go | 10 +- pkg/logql/log/pipeline_test.go | 10 +- pkg/logql/optimize.go | 6 +- pkg/logql/range_vector_test.go | 2 +- pkg/logql/rangemapper.go | 2 +- pkg/logql/shardmapper_test.go | 88 +- pkg/logql/syntax/ast.go | 297 ++- pkg/logql/syntax/ast_test.go | 8 +- pkg/logql/syntax/clone.go | 26 +- pkg/logql/syntax/expr.y.go | 2046 ------------------- pkg/logql/syntax/lex.go | 10 +- pkg/logql/syntax/lex_test.go | 12 +- pkg/logql/syntax/parser.go | 8 +- pkg/logql/syntax/parser_test.go | 120 +- pkg/logql/syntax/prettier.go | 8 +- pkg/logql/syntax/serialize.go | 28 +- pkg/logql/syntax/{expr.y => syntax.y} | 275 +-- pkg/logql/syntax/syntax.y.go | 1989 ++++++++++++++++++ pkg/logql/syntax/test_utils.go | 2 +- pkg/logql/syntax/visit.go | 24 +- pkg/logql/syntax/visit_test.go | 4 +- pkg/querier/queryrange/split_by_interval.go | 2 +- 28 files changed, 2444 insertions(+), 2605 deletions(-) delete mode 100644 pkg/logql/syntax/expr.y.go rename pkg/logql/syntax/{expr.y => syntax.y} (72%) create mode 100644 pkg/logql/syntax/syntax.y.go diff --git a/Makefile b/Makefile index 3ead3375225cd..4f836262e7bfa 100644 --- a/Makefile +++ b/Makefile @@ -424,9 +424,7 @@ yacc: $(YACC_GOS) ifeq ($(BUILD_IN_CONTAINER),true) $(run_in_container) else - goyacc -p $(basename $(notdir $<)) -o $@ $< - sed -i.back '/^\/\/line/ d' $@ - rm ${@}.back + goyacc -l -p $(basename $(notdir $<)) -o $@ $< endif ######### diff --git a/pkg/logql/downstream_test.go b/pkg/logql/downstream_test.go index b3c0e1ede7f15..df37b3114e8e4 100644 --- a/pkg/logql/downstream_test.go +++ b/pkg/logql/downstream_test.go @@ -838,7 +838,7 @@ func TestFormat_ShardedExpr(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -854,7 +854,7 @@ func TestFormat_ShardedExpr(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -870,7 +870,7 @@ func TestFormat_ShardedExpr(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, diff --git a/pkg/logql/engine.go b/pkg/logql/engine.go index c3561e75184fa..a92a747efec04 100644 --- a/pkg/logql/engine.go +++ b/pkg/logql/engine.go @@ -489,7 +489,7 @@ func (q *query) checkIntervalLimit(expr syntax.SampleExpr, limit time.Duration) var err error expr.Walk(func(e syntax.Expr) { switch e := e.(type) { - case *syntax.LogRange: + case *syntax.LogRangeExpr: if e.Interval > limit { err = fmt.Errorf("%w: [%s] > [%s]", logqlmodel.ErrIntervalLimit, model.Duration(e.Interval), model.Duration(limit)) } diff --git a/pkg/logql/log/drop_labels.go b/pkg/logql/log/drop_labels.go index 0f0fcdee942f0..ff22238f4591f 100644 --- a/pkg/logql/log/drop_labels.go +++ b/pkg/logql/log/drop_labels.go @@ -6,28 +6,28 @@ import ( "github.com/grafana/loki/v3/pkg/logqlmodel" ) -type DropLabels struct { - dropLabels []DropLabel -} - -type DropLabel struct { +type NamedLabelMatcher struct { Matcher *labels.Matcher Name string } -func NewDropLabel(matcher *labels.Matcher, name string) DropLabel { - return DropLabel{ - Matcher: matcher, - Name: name, +func NewNamedLabelMatcher(m *labels.Matcher, n string) NamedLabelMatcher { + return NamedLabelMatcher{ + Matcher: m, + Name: n, } } -func NewDropLabels(dl []DropLabel) *DropLabels { - return &DropLabels{dropLabels: dl} +type DropLabels struct { + labels []NamedLabelMatcher +} + +func NewDropLabels(labels []NamedLabelMatcher) *DropLabels { + return &DropLabels{labels: labels} } func (dl *DropLabels) Process(_ int64, line []byte, lbls *LabelsBuilder) ([]byte, bool) { - for _, dropLabel := range dl.dropLabels { + for _, dropLabel := range dl.labels { if dropLabel.Matcher != nil { dropLabelMatches(dropLabel.Matcher, lbls) continue diff --git a/pkg/logql/log/drop_labels_test.go b/pkg/logql/log/drop_labels_test.go index bce8487fd93a7..69ca29741d9b2 100644 --- a/pkg/logql/log/drop_labels_test.go +++ b/pkg/logql/log/drop_labels_test.go @@ -12,7 +12,7 @@ import ( func Test_DropLabels(t *testing.T) { tests := []struct { Name string - dropLabels []DropLabel + dropLabels []NamedLabelMatcher err string errDetails string lbs labels.Labels @@ -20,7 +20,7 @@ func Test_DropLabels(t *testing.T) { }{ { "drop by name", - []DropLabel{ + []NamedLabelMatcher{ { nil, "app", @@ -40,7 +40,7 @@ func Test_DropLabels(t *testing.T) { }, { "drop by __error__", - []DropLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errJSON), "", @@ -63,7 +63,7 @@ func Test_DropLabels(t *testing.T) { }, { "drop with wrong __error__ value", - []DropLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errLogfmt), "", @@ -84,7 +84,7 @@ func Test_DropLabels(t *testing.T) { }, { "drop by __error_details__", - []DropLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchRegexp, logqlmodel.ErrorDetailsLabel, "expecting json.*"), "", @@ -107,7 +107,7 @@ func Test_DropLabels(t *testing.T) { }, { "drop labels with names and matcher", - []DropLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errJSON), "", diff --git a/pkg/logql/log/keep_labels.go b/pkg/logql/log/keep_labels.go index 67c93ecca8fd2..6f44efeae1550 100644 --- a/pkg/logql/log/keep_labels.go +++ b/pkg/logql/log/keep_labels.go @@ -1,33 +1,19 @@ package log import ( - "github.com/prometheus/prometheus/model/labels" - "github.com/grafana/loki/v3/pkg/logqlmodel" ) type KeepLabels struct { - keepLabels []KeepLabel -} - -type KeepLabel struct { - Matcher *labels.Matcher - Name string -} - -func NewKeepLabel(matcher *labels.Matcher, name string) KeepLabel { - return KeepLabel{ - Matcher: matcher, - Name: name, - } + labels []NamedLabelMatcher } -func NewKeepLabels(kl []KeepLabel) *KeepLabels { - return &KeepLabels{keepLabels: kl} +func NewKeepLabels(labels []NamedLabelMatcher) *KeepLabels { + return &KeepLabels{labels: labels} } func (kl *KeepLabels) Process(_ int64, line []byte, lbls *LabelsBuilder) ([]byte, bool) { - if len(kl.keepLabels) == 0 { + if len(kl.labels) == 0 { return line, true } @@ -38,7 +24,7 @@ func (kl *KeepLabels) Process(_ int64, line []byte, lbls *LabelsBuilder) ([]byte } var keep bool - for _, keepLabel := range kl.keepLabels { + for _, keepLabel := range kl.labels { if keepLabel.Matcher != nil && keepLabel.Matcher.Name == lb.Name && keepLabel.Matcher.Matches(lb.Value) { keep = true break diff --git a/pkg/logql/log/keep_labels_test.go b/pkg/logql/log/keep_labels_test.go index 11d70f0ac6549..ec01c69227573 100644 --- a/pkg/logql/log/keep_labels_test.go +++ b/pkg/logql/log/keep_labels_test.go @@ -12,14 +12,14 @@ import ( func Test_KeepLabels(t *testing.T) { for _, tc := range []struct { Name string - keepLabels []KeepLabel + keepLabels []NamedLabelMatcher lbs labels.Labels want labels.Labels }{ { "keep all", - []KeepLabel{}, + []NamedLabelMatcher{}, labels.FromStrings( "app", "foo", "namespace", "prod", @@ -35,7 +35,7 @@ func Test_KeepLabels(t *testing.T) { }, { "keep by name", - []KeepLabel{ + []NamedLabelMatcher{ { nil, "app", @@ -58,7 +58,7 @@ func Test_KeepLabels(t *testing.T) { }, { "keep labels with names and matcher", - []KeepLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, "namespace", "prod"), "", @@ -85,7 +85,7 @@ func Test_KeepLabels(t *testing.T) { }, { "preserve special labels", - []KeepLabel{ + []NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, "namespace", "prod"), "", diff --git a/pkg/logql/log/pipeline_test.go b/pkg/logql/log/pipeline_test.go index 8c11d0c198a10..7ef4653a912e5 100644 --- a/pkg/logql/log/pipeline_test.go +++ b/pkg/logql/log/pipeline_test.go @@ -328,7 +328,7 @@ func TestDropLabelsPipeline(t *testing.T) { []Stage{ NewLogfmtParser(true, false), NewJSONParser(), - NewDropLabels([]DropLabel{ + NewDropLabels([]NamedLabelMatcher{ { nil, "__error__", @@ -365,7 +365,7 @@ func TestDropLabelsPipeline(t *testing.T) { []Stage{ NewLogfmtParser(true, false), NewJSONParser(), - NewDropLabels([]DropLabel{ + NewDropLabels([]NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, logqlmodel.ErrorLabel, errLogfmt), "", @@ -431,7 +431,7 @@ func TestKeepLabelsPipeline(t *testing.T) { name: "keep all", stages: []Stage{ NewLogfmtParser(false, false), - NewKeepLabels([]KeepLabel{}), + NewKeepLabels([]NamedLabelMatcher{}), }, lines: [][]byte{ []byte(`level=info ts=2020-10-18T18:04:22.147378997Z caller=metrics.go:81 status=200`), @@ -467,7 +467,7 @@ func TestKeepLabelsPipeline(t *testing.T) { name: "keep by name", stages: []Stage{ NewLogfmtParser(false, false), - NewKeepLabels([]KeepLabel{ + NewKeepLabels([]NamedLabelMatcher{ { nil, "level", @@ -498,7 +498,7 @@ func TestKeepLabelsPipeline(t *testing.T) { name: "keep by matcher", stages: []Stage{ NewLogfmtParser(false, false), - NewKeepLabels([]KeepLabel{ + NewKeepLabels([]NamedLabelMatcher{ { labels.MustNewMatcher(labels.MatchEqual, "level", "info"), "", diff --git a/pkg/logql/optimize.go b/pkg/logql/optimize.go index 53f1bc94ba397..ffa0996cdcc9e 100644 --- a/pkg/logql/optimize.go +++ b/pkg/logql/optimize.go @@ -55,7 +55,7 @@ func removeLineformat(expr syntax.SampleExpr) { found = true break } - if _, ok := pipelineExpr.MultiStages[j].(*syntax.LabelParserExpr); ok { + if _, ok := pipelineExpr.MultiStages[j].(*syntax.LineParserExpr); ok { found = true break } @@ -63,11 +63,11 @@ func removeLineformat(expr syntax.SampleExpr) { found = true break } - if _, ok := pipelineExpr.MultiStages[j].(*syntax.JSONExpressionParser); ok { + if _, ok := pipelineExpr.MultiStages[j].(*syntax.JSONExpressionParserExpr); ok { found = true break } - if _, ok := pipelineExpr.MultiStages[j].(*syntax.LogfmtExpressionParser); ok { + if _, ok := pipelineExpr.MultiStages[j].(*syntax.LogfmtExpressionParserExpr); ok { found = true break } diff --git a/pkg/logql/range_vector_test.go b/pkg/logql/range_vector_test.go index f0ee5454b6334..58a978fff6ad0 100644 --- a/pkg/logql/range_vector_test.go +++ b/pkg/logql/range_vector_test.go @@ -541,7 +541,7 @@ func Test_InstantQueryRangeVectorAggregations(t *testing.T) { for _, tt := range tests { t.Run(fmt.Sprintf("testing aggregation %s", tt.name), func(t *testing.T) { it, err := newRangeVectorIterator(sampleIter(tt.negative), - &syntax.RangeAggregationExpr{Left: &syntax.LogRange{Interval: 2}, Params: proto.Float64(0.99), Operation: tt.op}, + &syntax.RangeAggregationExpr{Left: &syntax.LogRangeExpr{Interval: 2}, Params: proto.Float64(0.99), Operation: tt.op}, 3, 1, start, end, 0) require.NoError(t, err) diff --git a/pkg/logql/rangemapper.go b/pkg/logql/rangemapper.go index bec1711226109..f33d50b0c5d92 100644 --- a/pkg/logql/rangemapper.go +++ b/pkg/logql/rangemapper.go @@ -208,7 +208,7 @@ func hasLabelExtractionStage(expr syntax.SampleExpr) bool { switch concrete := e.(type) { case *syntax.LogfmtParserExpr: found = true - case *syntax.LabelParserExpr: + case *syntax.LineParserExpr: // It will **not** return true for `regexp`, `unpack` and `pattern`, since these label extraction // stages can control how many labels, and therefore the resulting amount of series, are extracted. if concrete.Op == syntax.OpParserTypeJSON { diff --git a/pkg/logql/shardmapper_test.go b/pkg/logql/shardmapper_test.go index b1842c094896c..652fa26c69430 100644 --- a/pkg/logql/shardmapper_test.go +++ b/pkg/logql/shardmapper_test.go @@ -62,7 +62,7 @@ func TestMapSampleExpr(t *testing.T) { { in: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -77,7 +77,7 @@ func TestMapSampleExpr(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -93,7 +93,7 @@ func TestMapSampleExpr(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -624,7 +624,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -640,7 +640,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -662,7 +662,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeCount, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -678,7 +678,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeCount, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -706,7 +706,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -726,7 +726,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -754,7 +754,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -770,7 +770,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -800,7 +800,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCountMinSketch, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -819,7 +819,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCountMinSketch, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -849,7 +849,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -869,7 +869,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -901,7 +901,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -921,7 +921,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -948,7 +948,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -968,7 +968,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1010,7 +1010,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1032,7 +1032,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1070,7 +1070,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1090,7 +1090,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1125,7 +1125,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1145,7 +1145,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1189,7 +1189,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1211,7 +1211,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1239,7 +1239,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1259,7 +1259,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1303,7 +1303,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1325,7 +1325,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1352,7 +1352,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1372,7 +1372,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeCount, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeRate, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1409,7 +1409,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeSum, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1434,7 +1434,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeSum, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1468,7 +1468,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeCount, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1490,7 +1490,7 @@ func TestMapping(t *testing.T) { Operation: syntax.OpTypeSum, Left: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeCount, - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1510,7 +1510,7 @@ func TestMapping(t *testing.T) { expr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeQuantile, Params: float64p(0.8), - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1526,7 +1526,7 @@ func TestMapping(t *testing.T) { expr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeQuantile, Params: float64p(0.8), - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}, }, @@ -1551,7 +1551,7 @@ func TestMapping(t *testing.T) { SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeQuantile, Params: float64p(0.99), - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "foo")}, }, @@ -1574,7 +1574,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.VectorAggregationExpr{ Left: &syntax.RangeAggregationExpr{ - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "bar")}, }, @@ -1597,7 +1597,7 @@ func TestMapping(t *testing.T) { }).Bind(nil), SampleExpr: &syntax.VectorAggregationExpr{ Left: &syntax.RangeAggregationExpr{ - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "bar")}, }, @@ -1634,7 +1634,7 @@ func TestMapping(t *testing.T) { SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeQuantile, Params: float64p(0.99), - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "foo")}, }, @@ -1668,7 +1668,7 @@ func TestMapping(t *testing.T) { SampleExpr: &syntax.RangeAggregationExpr{ Operation: syntax.OpRangeTypeQuantile, Params: float64p(0.99), - Left: &syntax.LogRange{ + Left: &syntax.LogRangeExpr{ Left: &syntax.MatchersExpr{ Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "a", "foo")}, }, diff --git a/pkg/logql/syntax/ast.go b/pkg/logql/syntax/ast.go index 33959098d8e24..3335f2eae59f2 100644 --- a/pkg/logql/syntax/ast.go +++ b/pkg/logql/syntax/ast.go @@ -21,20 +21,107 @@ import ( "github.com/grafana/loki/v3/pkg/logqlmodel" ) +// Type alias for backward compatibility +type ( + Pipeline = log.Pipeline + SampleExtractor = log.SampleExtractor +) + // Expr is the root expression which can be a SampleExpr or LogSelectorExpr -// -//sumtype:decl type Expr interface { - logQLExpr() // ensure it's not implemented accidentally - Shardable(topLevel bool) bool // A recursive check on the AST to see if it's shardable. + // Shardable performs a recursive check on the expression and its children to see whether it is shardable. + Shardable(topLevel bool) bool + // Accepts a walk function to recursively visit the AST + // Works on expressions that are represented in the LogQL syntax as well as expressions from the "execution plan". Walkable - fmt.Stringer + // Accepts a visitor to recursively visit the AST. + // Only works on expressions that are represented in the LogQL syntax. AcceptVisitor - - // Pretty prettyfies any LogQL expression at given `level` of the whole LogQL query. + // Returns the string representation of the expression. + fmt.Stringer + // Pretty returns a nicely formatted string representation of the expression at given level. Pretty(level int) string + // isExpr ensures that the interface is not implemented accidentally. + isExpr() +} + +func (MatchersExpr) isExpr() {} +func (PipelineExpr) isExpr() {} +func (RangeAggregationExpr) isExpr() {} +func (VectorAggregationExpr) isExpr() {} +func (LiteralExpr) isExpr() {} +func (VectorExpr) isExpr() {} +func (LabelReplaceExpr) isExpr() {} +func (LineParserExpr) isExpr() {} +func (LogfmtParserExpr) isExpr() {} +func (LineFilterExpr) isExpr() {} +func (LabelFilterExpr) isExpr() {} +func (DecolorizeExpr) isExpr() {} +func (DropLabelsExpr) isExpr() {} +func (KeepLabelsExpr) isExpr() {} +func (LineFmtExpr) isExpr() {} +func (LabelFmtExpr) isExpr() {} +func (JSONExpressionParserExpr) isExpr() {} +func (LogfmtExpressionParserExpr) isExpr() {} +func (LogRangeExpr) isExpr() {} +func (OffsetExpr) isExpr() {} +func (UnwrapExpr) isExpr() {} + +// LogSelectorExpr is a expression filtering and returning logs. +type LogSelectorExpr interface { + Matchers() []*labels.Matcher + Pipeline() (Pipeline, error) + HasFilter() bool + + Expr + // isLogSelectorExpr ensures that the interface is not implemented accidentally. + isLogSelectorExpr() +} + +func (MatchersExpr) isLogSelectorExpr() {} +func (PipelineExpr) isLogSelectorExpr() {} +func (MultiStageExpr) isLogSelectorExpr() {} +func (LiteralExpr) isLogSelectorExpr() {} +func (VectorExpr) isLogSelectorExpr() {} + +// SampleExpr is a LogQL expression filtering logs and returning metric samples +type SampleExpr interface { + Selector() (LogSelectorExpr, error) + Extractor() (SampleExtractor, error) + MatcherGroups() ([]MatcherRange, error) + + Expr + // isSampleExpr ensures that the interface is not implemented accidentally. + isSampleExpr() +} + +func (RangeAggregationExpr) isSampleExpr() {} +func (VectorAggregationExpr) isSampleExpr() {} +func (LiteralExpr) isSampleExpr() {} +func (VectorExpr) isSampleExpr() {} +func (LabelReplaceExpr) isSampleExpr() {} + +// StageExpr is an expression defining a single step into a log pipeline +type StageExpr interface { + Stage() (log.Stage, error) + + Expr + // isStageExpr ensures that the interface is not implemented accidentally. + isStageExpr() } +func (LineParserExpr) isStageExpr() {} +func (LogfmtParserExpr) isStageExpr() {} +func (LineFilterExpr) isStageExpr() {} +func (LabelFilterExpr) isStageExpr() {} +func (DecolorizeExpr) isStageExpr() {} +func (DropLabelsExpr) isStageExpr() {} +func (KeepLabelsExpr) isStageExpr() {} +func (LineFmtExpr) isStageExpr() {} +func (LabelFmtExpr) isStageExpr() {} +func (JSONExpressionParserExpr) isStageExpr() {} +func (LogfmtExpressionParserExpr) isStageExpr() {} + func Clone[T Expr](e T) (T, error) { var empty T v := &cloneVisitor{} @@ -95,9 +182,9 @@ func ExtractLabelFiltersBeforeParser(e Expr) []*LabelFilterExpr { // misbehave. VisitLogfmtParserFn: func(_ RootVisitor, _ *LogfmtParserExpr) { foundParseStage = true }, - VisitLabelParserFn: func(_ RootVisitor, _ *LabelParserExpr) { foundParseStage = true }, - VisitJSONExpressionParserFn: func(_ RootVisitor, _ *JSONExpressionParser) { foundParseStage = true }, - VisitLogfmtExpressionParserFn: func(_ RootVisitor, _ *LogfmtExpressionParser) { foundParseStage = true }, + VisitLabelParserFn: func(_ RootVisitor, _ *LineParserExpr) { foundParseStage = true }, + VisitJSONExpressionParserFn: func(_ RootVisitor, _ *JSONExpressionParserExpr) { foundParseStage = true }, + VisitLogfmtExpressionParserFn: func(_ RootVisitor, _ *LogfmtExpressionParserExpr) { foundParseStage = true }, VisitLabelFmtFn: func(_ RootVisitor, _ *LabelFmtExpr) { foundParseStage = true }, VisitKeepLabelFn: func(_ RootVisitor, _ *KeepLabelsExpr) { foundParseStage = true }, VisitDropLabelsFn: func(_ RootVisitor, _ *DropLabelsExpr) { foundParseStage = true }, @@ -119,39 +206,6 @@ func IsMatchEqualFilterer(filterer log.LabelFilterer) bool { } } -// implicit holds default implementations -type implicit struct{} - -func (implicit) logQLExpr() {} - -// LogSelectorExpr is a LogQL expression filtering and returning logs. -// -//sumtype:decl -type LogSelectorExpr interface { - Matchers() []*labels.Matcher - Pipeline() (Pipeline, error) - HasFilter() bool - Expr - - isLogSelectorExpr() -} - -// Type alias for backward compatibility -type ( - Pipeline = log.Pipeline - SampleExtractor = log.SampleExtractor -) - -// StageExpr is an expression defining a single step into a log pipeline -// -//sumtype:decl -type StageExpr interface { - Stage() (log.Stage, error) - Expr - - isStageExpr() -} - // MultiStageExpr is multiple stages which implements a LogSelectorExpr. type MultiStageExpr []StageExpr @@ -213,7 +267,7 @@ func (m MultiStageExpr) reorderStages() []StageExpr { notLineFilters = append(notLineFilters, f) combineFilters() - case *LabelParserExpr: + case *LineParserExpr: notLineFilters = append(notLineFilters, f) // unpack modifies the contents of the line so any line filter @@ -270,15 +324,12 @@ func (MultiStageExpr) logQLExpr() {} // nolint:unused type MatchersExpr struct { Mts []*labels.Matcher - implicit } func newMatcherExpr(matchers []*labels.Matcher) *MatchersExpr { return &MatchersExpr{Mts: matchers} } -func (e *MatchersExpr) isLogSelectorExpr() {} - func (e *MatchersExpr) Matchers() []*labels.Matcher { return e.Mts } @@ -317,7 +368,6 @@ func (e *MatchersExpr) HasFilter() bool { type PipelineExpr struct { MultiStages MultiStageExpr Left *MatchersExpr - implicit } func newPipelineExpr(left *MatchersExpr, pipeline MultiStageExpr) LogSelectorExpr { @@ -327,8 +377,6 @@ func newPipelineExpr(left *MatchersExpr, pipeline MultiStageExpr) LogSelectorExp } } -func (e *PipelineExpr) isLogSelectorExpr() {} - func (e *PipelineExpr) Shardable(topLevel bool) bool { for _, p := range e.MultiStages { if !p.Shardable(topLevel) { @@ -407,7 +455,6 @@ type LineFilterExpr struct { // See LineFilterExpr tests for more examples. Or *LineFilterExpr IsOrChild bool - implicit } func newLineFilterExpr(ty log.LineMatchType, op, match string) *LineFilterExpr { @@ -420,7 +467,7 @@ func newLineFilterExpr(ty log.LineMatchType, op, match string) *LineFilterExpr { } } -func newOrLineFilter(left, right *LineFilterExpr) *LineFilterExpr { +func newOrLineFilterExpr(left, right *LineFilterExpr) *LineFilterExpr { right.Ty = left.Ty // NOTE: Consider, we have chain of "or", != "foo" or "bar" or "baz" @@ -493,8 +540,6 @@ func newNestedLineFilterExpr(left *LineFilterExpr, right *LineFilterExpr) *LineF } } -func (*LineFilterExpr) isStageExpr() {} - func (e *LineFilterExpr) Walk(f WalkFn) { f(e) if e.Left != nil { @@ -626,8 +671,6 @@ func (e *LineFilterExpr) Stage() (log.Stage, error) { type LogfmtParserExpr struct { Strict bool KeepEmpty bool - - implicit } func newLogfmtParserExpr(flags []string) *LogfmtParserExpr { @@ -644,8 +687,6 @@ func newLogfmtParserExpr(flags []string) *LogfmtParserExpr { return &e } -func (*LogfmtParserExpr) isStageExpr() {} - func (e *LogfmtParserExpr) Shardable(_ bool) bool { return true } func (e *LogfmtParserExpr) Walk(f WalkFn) { f(e) } @@ -675,13 +716,12 @@ func (e *LogfmtParserExpr) String() string { return sb.String() } -type LabelParserExpr struct { +type LineParserExpr struct { Op string Param string - implicit } -func newLabelParserExpr(op, param string) *LabelParserExpr { +func newLabelParserExpr(op, param string) *LineParserExpr { if op == OpParserTypeRegexp { _, err := log.NewRegexpParser(param) if err != nil { @@ -695,21 +735,19 @@ func newLabelParserExpr(op, param string) *LabelParserExpr { } } - return &LabelParserExpr{ + return &LineParserExpr{ Op: op, Param: param, } } -func (*LabelParserExpr) isStageExpr() {} +func (e *LineParserExpr) Shardable(_ bool) bool { return true } -func (e *LabelParserExpr) Shardable(_ bool) bool { return true } +func (e *LineParserExpr) Walk(f WalkFn) { f(e) } -func (e *LabelParserExpr) Walk(f WalkFn) { f(e) } +func (e *LineParserExpr) Accept(v RootVisitor) { v.VisitLabelParser(e) } -func (e *LabelParserExpr) Accept(v RootVisitor) { v.VisitLabelParser(e) } - -func (e *LabelParserExpr) Stage() (log.Stage, error) { +func (e *LineParserExpr) Stage() (log.Stage, error) { switch e.Op { case OpParserTypeJSON: return log.NewJSONParser(), nil @@ -724,7 +762,7 @@ func (e *LabelParserExpr) Stage() (log.Stage, error) { } } -func (e *LabelParserExpr) String() string { +func (e *LineParserExpr) String() string { var sb strings.Builder sb.WriteString(OpPipe) sb.WriteString(" ") @@ -741,7 +779,6 @@ func (e *LabelParserExpr) String() string { type LabelFilterExpr struct { log.LabelFilterer - implicit } func newLabelFilterExpr(filterer log.LabelFilterer) *LabelFilterExpr { @@ -750,8 +787,6 @@ func newLabelFilterExpr(filterer log.LabelFilterer) *LabelFilterExpr { } } -func (*LabelFilterExpr) isStageExpr() {} - func (e *LabelFilterExpr) Shardable(_ bool) bool { return true } func (e *LabelFilterExpr) Walk(f WalkFn) { f(e) } @@ -774,7 +809,6 @@ func (e *LabelFilterExpr) String() string { type LineFmtExpr struct { Value string - implicit } func newLineFmtExpr(value string) *LineFmtExpr { @@ -784,15 +818,12 @@ func newLineFmtExpr(value string) *LineFmtExpr { } type DecolorizeExpr struct { - implicit } func newDecolorizeExpr() *DecolorizeExpr { return &DecolorizeExpr{} } -func (*DecolorizeExpr) isStageExpr() {} - func (e *DecolorizeExpr) Shardable(_ bool) bool { return true } func (e *DecolorizeExpr) Stage() (log.Stage, error) { @@ -807,16 +838,13 @@ func (e *DecolorizeExpr) Walk(f WalkFn) { f(e) } func (e *DecolorizeExpr) Accept(v RootVisitor) { v.VisitDecolorize(e) } type DropLabelsExpr struct { - dropLabels []log.DropLabel - implicit + dropLabels []log.NamedLabelMatcher } -func newDropLabelsExpr(dropLabels []log.DropLabel) *DropLabelsExpr { +func newDropLabelsExpr(dropLabels []log.NamedLabelMatcher) *DropLabelsExpr { return &DropLabelsExpr{dropLabels: dropLabels} } -func (*DropLabelsExpr) isStageExpr() {} - func (e *DropLabelsExpr) Shardable(_ bool) bool { return true } func (e *DropLabelsExpr) Stage() (log.Stage, error) { @@ -850,16 +878,13 @@ func (e *DropLabelsExpr) Walk(f WalkFn) { f(e) } func (e *DropLabelsExpr) Accept(v RootVisitor) { v.VisitDropLabels(e) } type KeepLabelsExpr struct { - keepLabels []log.KeepLabel - implicit + keepLabels []log.NamedLabelMatcher } -func newKeepLabelsExpr(keepLabels []log.KeepLabel) *KeepLabelsExpr { +func newKeepLabelsExpr(keepLabels []log.NamedLabelMatcher) *KeepLabelsExpr { return &KeepLabelsExpr{keepLabels: keepLabels} } -func (*KeepLabelsExpr) isStageExpr() {} - func (e *KeepLabelsExpr) Shardable(_ bool) bool { return true } func (e *KeepLabelsExpr) Stage() (log.Stage, error) { @@ -893,8 +918,6 @@ func (e *KeepLabelsExpr) Walk(f WalkFn) { f(e) } func (e *KeepLabelsExpr) Accept(v RootVisitor) { v.VisitKeepLabel(e) } -func (*LineFmtExpr) isStageExpr() {} - func (e *LineFmtExpr) Shardable(_ bool) bool { return true } func (e *LineFmtExpr) Walk(f WalkFn) { f(e) } @@ -911,7 +934,6 @@ func (e *LineFmtExpr) String() string { type LabelFmtExpr struct { Formats []log.LabelFmt - implicit } func newLabelFmtExpr(fmts []log.LabelFmt) *LabelFmtExpr { @@ -920,8 +942,6 @@ func newLabelFmtExpr(fmts []log.LabelFmt) *LabelFmtExpr { } } -func (*LabelFmtExpr) isStageExpr() {} - func (e *LabelFmtExpr) Shardable(_ bool) bool { // While LabelFmt is shardable in certain cases, it is not always, // but this is left to the shardmapper to determine @@ -956,31 +976,27 @@ func (e *LabelFmtExpr) String() string { return sb.String() } -type JSONExpressionParser struct { +type JSONExpressionParserExpr struct { Expressions []log.LabelExtractionExpr - - implicit } -func newJSONExpressionParser(expressions []log.LabelExtractionExpr) *JSONExpressionParser { - return &JSONExpressionParser{ +func newJSONExpressionParser(expressions []log.LabelExtractionExpr) *JSONExpressionParserExpr { + return &JSONExpressionParserExpr{ Expressions: expressions, } } -func (*JSONExpressionParser) isStageExpr() {} - -func (j *JSONExpressionParser) Shardable(_ bool) bool { return true } +func (j *JSONExpressionParserExpr) Shardable(_ bool) bool { return true } -func (j *JSONExpressionParser) Walk(f WalkFn) { f(j) } +func (j *JSONExpressionParserExpr) Walk(f WalkFn) { f(j) } -func (j *JSONExpressionParser) Accept(v RootVisitor) { v.VisitJSONExpressionParser(j) } +func (j *JSONExpressionParserExpr) Accept(v RootVisitor) { v.VisitJSONExpressionParser(j) } -func (j *JSONExpressionParser) Stage() (log.Stage, error) { +func (j *JSONExpressionParserExpr) Stage() (log.Stage, error) { return log.NewJSONExpressionParser(j.Expressions) } -func (j *JSONExpressionParser) String() string { +func (j *JSONExpressionParserExpr) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpParserTypeJSON)) for i, exp := range j.Expressions { @@ -1000,15 +1016,13 @@ type internedStringSet map[string]struct { ok bool } -type LogfmtExpressionParser struct { +type LogfmtExpressionParserExpr struct { Expressions []log.LabelExtractionExpr Strict, KeepEmpty bool - - implicit } -func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr, flags []string) *LogfmtExpressionParser { - e := LogfmtExpressionParser{ +func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr, flags []string) *LogfmtExpressionParserExpr { + e := LogfmtExpressionParserExpr{ Expressions: expressions, } @@ -1024,19 +1038,17 @@ func newLogfmtExpressionParser(expressions []log.LabelExtractionExpr, flags []st return &e } -func (*LogfmtExpressionParser) isStageExpr() {} - -func (l *LogfmtExpressionParser) Shardable(_ bool) bool { return true } +func (l *LogfmtExpressionParserExpr) Shardable(_ bool) bool { return true } -func (l *LogfmtExpressionParser) Walk(f WalkFn) { f(l) } +func (l *LogfmtExpressionParserExpr) Walk(f WalkFn) { f(l) } -func (l *LogfmtExpressionParser) Accept(v RootVisitor) { v.VisitLogfmtExpressionParser(l) } +func (l *LogfmtExpressionParserExpr) Accept(v RootVisitor) { v.VisitLogfmtExpressionParser(l) } -func (l *LogfmtExpressionParser) Stage() (log.Stage, error) { +func (l *LogfmtExpressionParserExpr) Stage() (log.Stage, error) { return log.NewLogfmtExpressionParser(l.Expressions, l.Strict) } -func (l *LogfmtExpressionParser) String() string { +func (l *LogfmtExpressionParserExpr) String() string { var sb strings.Builder sb.WriteString(fmt.Sprintf("%s %s ", OpPipe, OpParserTypeLogfmt)) if l.Strict { @@ -1122,18 +1134,15 @@ func newUnwrapExpr(id string, operation string) *UnwrapExpr { return &UnwrapExpr{Identifier: id, Operation: operation} } -type LogRange struct { +type LogRangeExpr struct { Left LogSelectorExpr Interval time.Duration Offset time.Duration - - Unwrap *UnwrapExpr - - implicit + Unwrap *UnwrapExpr } // impls Stringer -func (r LogRange) String() string { +func (r LogRangeExpr) String() string { var sb strings.Builder sb.WriteString(r.Left.String()) if r.Unwrap != nil { @@ -1147,38 +1156,38 @@ func (r LogRange) String() string { return sb.String() } -func (r *LogRange) Shardable(topLevel bool) bool { return r.Left.Shardable(topLevel) } +func (r *LogRangeExpr) Shardable(topLevel bool) bool { return r.Left.Shardable(topLevel) } -func (r *LogRange) Walk(f WalkFn) { +func (r *LogRangeExpr) Walk(f WalkFn) { f(r) if r.Left != nil { r.Left.Walk(f) } } -func (r *LogRange) Accept(v RootVisitor) { +func (r *LogRangeExpr) Accept(v RootVisitor) { v.VisitLogRange(r) } // WithoutUnwrap returns a copy of the log range without the unwrap statement. -func (r *LogRange) WithoutUnwrap() (*LogRange, error) { +func (r *LogRangeExpr) WithoutUnwrap() (*LogRangeExpr, error) { left, err := Clone(r.Left) if err != nil { return nil, err } - return &LogRange{ + return &LogRangeExpr{ Left: left, Interval: r.Interval, Offset: r.Offset, }, nil } -func newLogRange(left LogSelectorExpr, interval time.Duration, u *UnwrapExpr, o *OffsetExpr) *LogRange { +func newLogRange(left LogSelectorExpr, interval time.Duration, u *UnwrapExpr, o *OffsetExpr) *LogRangeExpr { var offset time.Duration if o != nil { offset = o.Offset } - return &LogRange{ + return &LogRangeExpr{ Left: left, Interval: interval, Unwrap: u, @@ -1330,31 +1339,18 @@ func IsLogicalBinOp(op string) bool { } } -// SampleExpr is a LogQL expression filtering logs and returning metric samples. -// -//sumtype:decl -type SampleExpr interface { - // Selector is the LogQL selector to apply when retrieving logs. - Selector() (LogSelectorExpr, error) - Extractor() (SampleExtractor, error) - MatcherGroups() ([]MatcherRange, error) - Expr - isSampleExpr() -} - // RangeAggregationExpr not all range vector aggregation expressions support grouping by/without label(s), // therefore the Grouping struct can be nil. type RangeAggregationExpr struct { - Left *LogRange + Left *LogRangeExpr Operation string Params *float64 Grouping *Grouping err error - implicit } -func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, stringParams *string) SampleExpr { +func newRangeAggregationExpr(left *LogRangeExpr, operation string, gr *Grouping, stringParams *string) SampleExpr { var params *float64 if stringParams != nil { if operation != OpRangeTypeQuantile && operation != OpRangeTypeQuantileSketch { @@ -1383,7 +1379,6 @@ func newRangeAggregationExpr(left *LogRange, operation string, gr *Grouping, str } return e } -func (e *RangeAggregationExpr) isSampleExpr() {} func (e *RangeAggregationExpr) Selector() (LogSelectorExpr, error) { if e.err != nil { @@ -1529,7 +1524,6 @@ type VectorAggregationExpr struct { Params int `json:"params"` Operation string `json:"operation"` err error - implicit } func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *Grouping, params *string) SampleExpr { @@ -1567,8 +1561,6 @@ func mustNewVectorAggregationExpr(left SampleExpr, operation string, gr *Groupin } } -func (e *VectorAggregationExpr) isSampleExpr() {} - func (e *VectorAggregationExpr) MatcherGroups() ([]MatcherRange, error) { if e.err != nil { return nil, e.err @@ -2113,7 +2105,6 @@ func MergeBinOp(op string, left, right *promql.Sample, swap, filter, isVectorCom type LiteralExpr struct { Val float64 `json:"val"` err error - implicit } func mustNewLiteralExpr(s string, invert bool) *LiteralExpr { @@ -2139,8 +2130,6 @@ func (e *LiteralExpr) String() string { // literlExpr impls SampleExpr & LogSelectorExpr mainly to reduce the need for more complicated typings // to facilitate sum types. We'll be type switching when evaluating them anyways // and they will only be present in binary operation legs. -func (e *LiteralExpr) isSampleExpr() {} -func (e *LiteralExpr) isLogSelectorExpr() {} func (e *LiteralExpr) Selector() (LogSelectorExpr, error) { return e, e.err } func (e *LiteralExpr) HasFilter() bool { return false } func (e *LiteralExpr) Shardable(_ bool) bool { return true } @@ -2186,8 +2175,6 @@ type LabelReplaceExpr struct { Regex string Re *regexp.Regexp err error - - implicit } func mustNewLabelReplaceExpr(left SampleExpr, dst, replacement, src, regex string) *LabelReplaceExpr { @@ -2207,8 +2194,6 @@ func mustNewLabelReplaceExpr(left SampleExpr, dst, replacement, src, regex strin } } -func (e *LabelReplaceExpr) isSampleExpr() {} - func (e *LabelReplaceExpr) Selector() (LogSelectorExpr, error) { if e.err != nil { return nil, e.err @@ -2334,7 +2319,6 @@ func MatcherGroups(expr Expr) ([]MatcherRange, error) { type VectorExpr struct { Val float64 err error - implicit } func NewVectorExpr(scalar string) *VectorExpr { @@ -2348,9 +2332,6 @@ func NewVectorExpr(scalar string) *VectorExpr { } } -func (e *VectorExpr) isSampleExpr() {} -func (e *VectorExpr) isLogSelectorExpr() {} - func (e *VectorExpr) Err() error { return e.err } diff --git a/pkg/logql/syntax/ast_test.go b/pkg/logql/syntax/ast_test.go index ea155520edad5..42dcbc897782e 100644 --- a/pkg/logql/syntax/ast_test.go +++ b/pkg/logql/syntax/ast_test.go @@ -636,7 +636,7 @@ func TestOrLineFilterTypes(t *testing.T) { left := &LineFilterExpr{LineFilter: LineFilter{Ty: tt.ty, Match: "something"}} right := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}} - _ = newOrLineFilter(left, right) + _ = newOrLineFilterExpr(left, right) require.Equal(t, tt.ty, right.Ty) require.Equal(t, tt.ty, left.Ty) }) @@ -646,7 +646,7 @@ func TestOrLineFilterTypes(t *testing.T) { f2 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}} f3 := &LineFilterExpr{LineFilter: LineFilter{Ty: log.LineMatchEqual, Match: "something"}} - _ = newOrLineFilter(f1, newOrLineFilter(f2, f3)) + _ = newOrLineFilterExpr(f1, newOrLineFilterExpr(f2, f3)) require.Equal(t, tt.ty, f1.Ty) require.Equal(t, tt.ty, f2.Ty) require.Equal(t, tt.ty, f3.Ty) @@ -887,7 +887,7 @@ func Test_parserExpr_Parser(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var e *LabelParserExpr + var e *LineParserExpr if tt.wantPanic { require.Panics(t, func() { e = newLabelParserExpr(tt.op, tt.param) }) return @@ -923,7 +923,7 @@ func Test_parserExpr_String(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - l := LabelParserExpr{ + l := LineParserExpr{ Op: tt.op, Param: tt.param, } diff --git a/pkg/logql/syntax/clone.go b/pkg/logql/syntax/clone.go index d047218b0b607..17b1852cb6555 100644 --- a/pkg/logql/syntax/clone.go +++ b/pkg/logql/syntax/clone.go @@ -66,7 +66,7 @@ func (v *cloneVisitor) VisitVectorAggregation(e *VectorAggregationExpr) { func (v *cloneVisitor) VisitRangeAggregation(e *RangeAggregationExpr) { copied := &RangeAggregationExpr{ - Left: MustClone[*LogRange](e.Left), + Left: MustClone[*LogRangeExpr](e.Left), Operation: e.Operation, } @@ -95,8 +95,8 @@ func (v *cloneVisitor) VisitVector(e *VectorExpr) { v.cloned = &VectorExpr{Val: e.Val} } -func (v *cloneVisitor) VisitLogRange(e *LogRange) { - copied := &LogRange{ +func (v *cloneVisitor) VisitLogRange(e *LogRangeExpr) { + copied := &LogRangeExpr{ Left: MustClone[LogSelectorExpr](e.Left), Interval: e.Interval, Offset: e.Offset, @@ -146,21 +146,21 @@ func (v *cloneVisitor) VisitDecolorize(*DecolorizeExpr) { func (v *cloneVisitor) VisitDropLabels(e *DropLabelsExpr) { copied := &DropLabelsExpr{ - dropLabels: make([]log.DropLabel, len(e.dropLabels)), + dropLabels: make([]log.NamedLabelMatcher, len(e.dropLabels)), } for i, l := range e.dropLabels { var matcher *labels.Matcher if l.Matcher != nil { matcher = labels.MustNewMatcher(l.Matcher.Type, l.Matcher.Name, l.Matcher.Value) } - copied.dropLabels[i] = log.NewDropLabel(matcher, l.Name) + copied.dropLabels[i] = log.NewNamedLabelMatcher(matcher, l.Name) } v.cloned = copied } -func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParser) { - copied := &JSONExpressionParser{ +func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParserExpr) { + copied := &JSONExpressionParserExpr{ Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), } copy(copied.Expressions, e.Expressions) @@ -170,10 +170,10 @@ func (v *cloneVisitor) VisitJSONExpressionParser(e *JSONExpressionParser) { func (v *cloneVisitor) VisitKeepLabel(e *KeepLabelsExpr) { copied := &KeepLabelsExpr{ - keepLabels: make([]log.KeepLabel, len(e.keepLabels)), + keepLabels: make([]log.NamedLabelMatcher, len(e.keepLabels)), } for i, k := range e.keepLabels { - copied.keepLabels[i] = log.KeepLabel{ + copied.keepLabels[i] = log.NamedLabelMatcher{ Name: k.Name, } if k.Matcher != nil { @@ -251,8 +251,8 @@ func (v *cloneVisitor) VisitLabelFmt(e *LabelFmtExpr) { v.cloned = copied } -func (v *cloneVisitor) VisitLabelParser(e *LabelParserExpr) { - v.cloned = &LabelParserExpr{ +func (v *cloneVisitor) VisitLabelParser(e *LineParserExpr) { + v.cloned = &LineParserExpr{ Op: e.Op, Param: e.Param, } @@ -283,8 +283,8 @@ func (v *cloneVisitor) VisitLineFmt(e *LineFmtExpr) { v.cloned = &LineFmtExpr{Value: e.Value} } -func (v *cloneVisitor) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) { - copied := &LogfmtExpressionParser{ +func (v *cloneVisitor) VisitLogfmtExpressionParser(e *LogfmtExpressionParserExpr) { + copied := &LogfmtExpressionParserExpr{ Expressions: make([]log.LabelExtractionExpr, len(e.Expressions)), Strict: e.Strict, KeepEmpty: e.KeepEmpty, diff --git a/pkg/logql/syntax/expr.y.go b/pkg/logql/syntax/expr.y.go deleted file mode 100644 index 85070b1c2b501..0000000000000 --- a/pkg/logql/syntax/expr.y.go +++ /dev/null @@ -1,2046 +0,0 @@ -// Code generated by goyacc -p expr -o pkg/logql/syntax/expr.y.go pkg/logql/syntax/expr.y. DO NOT EDIT. - -package syntax - -import __yyfmt__ "fmt" - - -import ( - "github.com/grafana/loki/v3/pkg/logql/log" - "github.com/prometheus/prometheus/model/labels" - "time" -) - -type exprSymType struct { - yys int - Expr Expr - Filter log.LineMatchType - Grouping *Grouping - Labels []string - LogExpr LogSelectorExpr - LogRangeExpr *LogRange - Matcher *labels.Matcher - Matchers []*labels.Matcher - RangeAggregationExpr SampleExpr - RangeOp string - ConvOp string - Selector []*labels.Matcher - VectorAggregationExpr SampleExpr - VectorExpr *VectorExpr - Vector string - MetricExpr SampleExpr - VectorOp string - FilterOp string - BinOpExpr SampleExpr - LabelReplaceExpr SampleExpr - binOp string - bytes uint64 - str string - duration time.Duration - LiteralExpr *LiteralExpr - BinOpModifier *BinOpOptions - BoolModifier *BinOpOptions - OnOrIgnoringModifier *BinOpOptions - LabelParser *LabelParserExpr - LogfmtParser *LogfmtParserExpr - LineFilters *LineFilterExpr - LineFilter *LineFilterExpr - OrFilter *LineFilterExpr - ParserFlags []string - PipelineExpr MultiStageExpr - PipelineStage StageExpr - BytesFilter log.LabelFilterer - NumberFilter log.LabelFilterer - DurationFilter log.LabelFilterer - LabelFilter log.LabelFilterer - UnitFilter log.LabelFilterer - IPLabelFilter log.LabelFilterer - LineFormatExpr *LineFmtExpr - LabelFormatExpr *LabelFmtExpr - LabelFormat log.LabelFmt - LabelsFormat []log.LabelFmt - - LabelExtractionExpression log.LabelExtractionExpr - LabelExtractionExpressionList []log.LabelExtractionExpr - JSONExpressionParser *JSONExpressionParser - LogfmtExpressionParser *LogfmtExpressionParser - - UnwrapExpr *UnwrapExpr - DecolorizeExpr *DecolorizeExpr - OffsetExpr *OffsetExpr - DropLabel log.DropLabel - DropLabels []log.DropLabel - DropLabelsExpr *DropLabelsExpr - KeepLabel log.KeepLabel - KeepLabels []log.KeepLabel - KeepLabelsExpr *KeepLabelsExpr -} - -const BYTES = 57346 -const IDENTIFIER = 57347 -const STRING = 57348 -const NUMBER = 57349 -const PARSER_FLAG = 57350 -const DURATION = 57351 -const RANGE = 57352 -const MATCHERS = 57353 -const LABELS = 57354 -const EQ = 57355 -const RE = 57356 -const NRE = 57357 -const NPA = 57358 -const OPEN_BRACE = 57359 -const CLOSE_BRACE = 57360 -const OPEN_BRACKET = 57361 -const CLOSE_BRACKET = 57362 -const COMMA = 57363 -const DOT = 57364 -const PIPE_MATCH = 57365 -const PIPE_EXACT = 57366 -const PIPE_PATTERN = 57367 -const OPEN_PARENTHESIS = 57368 -const CLOSE_PARENTHESIS = 57369 -const BY = 57370 -const WITHOUT = 57371 -const COUNT_OVER_TIME = 57372 -const RATE = 57373 -const RATE_COUNTER = 57374 -const SUM = 57375 -const SORT = 57376 -const SORT_DESC = 57377 -const AVG = 57378 -const MAX = 57379 -const MIN = 57380 -const COUNT = 57381 -const STDDEV = 57382 -const STDVAR = 57383 -const BOTTOMK = 57384 -const TOPK = 57385 -const APPROX_TOPK = 57386 -const BYTES_OVER_TIME = 57387 -const BYTES_RATE = 57388 -const BOOL = 57389 -const JSON = 57390 -const REGEXP = 57391 -const LOGFMT = 57392 -const PIPE = 57393 -const LINE_FMT = 57394 -const LABEL_FMT = 57395 -const UNWRAP = 57396 -const AVG_OVER_TIME = 57397 -const SUM_OVER_TIME = 57398 -const MIN_OVER_TIME = 57399 -const MAX_OVER_TIME = 57400 -const STDVAR_OVER_TIME = 57401 -const STDDEV_OVER_TIME = 57402 -const QUANTILE_OVER_TIME = 57403 -const BYTES_CONV = 57404 -const DURATION_CONV = 57405 -const DURATION_SECONDS_CONV = 57406 -const FIRST_OVER_TIME = 57407 -const LAST_OVER_TIME = 57408 -const ABSENT_OVER_TIME = 57409 -const VECTOR = 57410 -const LABEL_REPLACE = 57411 -const UNPACK = 57412 -const OFFSET = 57413 -const PATTERN = 57414 -const IP = 57415 -const ON = 57416 -const IGNORING = 57417 -const GROUP_LEFT = 57418 -const GROUP_RIGHT = 57419 -const DECOLORIZE = 57420 -const DROP = 57421 -const KEEP = 57422 -const OR = 57423 -const AND = 57424 -const UNLESS = 57425 -const CMP_EQ = 57426 -const NEQ = 57427 -const LT = 57428 -const LTE = 57429 -const GT = 57430 -const GTE = 57431 -const ADD = 57432 -const SUB = 57433 -const MUL = 57434 -const DIV = 57435 -const MOD = 57436 -const POW = 57437 - -var exprToknames = [...]string{ - "$end", - "error", - "$unk", - "BYTES", - "IDENTIFIER", - "STRING", - "NUMBER", - "PARSER_FLAG", - "DURATION", - "RANGE", - "MATCHERS", - "LABELS", - "EQ", - "RE", - "NRE", - "NPA", - "OPEN_BRACE", - "CLOSE_BRACE", - "OPEN_BRACKET", - "CLOSE_BRACKET", - "COMMA", - "DOT", - "PIPE_MATCH", - "PIPE_EXACT", - "PIPE_PATTERN", - "OPEN_PARENTHESIS", - "CLOSE_PARENTHESIS", - "BY", - "WITHOUT", - "COUNT_OVER_TIME", - "RATE", - "RATE_COUNTER", - "SUM", - "SORT", - "SORT_DESC", - "AVG", - "MAX", - "MIN", - "COUNT", - "STDDEV", - "STDVAR", - "BOTTOMK", - "TOPK", - "APPROX_TOPK", - "BYTES_OVER_TIME", - "BYTES_RATE", - "BOOL", - "JSON", - "REGEXP", - "LOGFMT", - "PIPE", - "LINE_FMT", - "LABEL_FMT", - "UNWRAP", - "AVG_OVER_TIME", - "SUM_OVER_TIME", - "MIN_OVER_TIME", - "MAX_OVER_TIME", - "STDVAR_OVER_TIME", - "STDDEV_OVER_TIME", - "QUANTILE_OVER_TIME", - "BYTES_CONV", - "DURATION_CONV", - "DURATION_SECONDS_CONV", - "FIRST_OVER_TIME", - "LAST_OVER_TIME", - "ABSENT_OVER_TIME", - "VECTOR", - "LABEL_REPLACE", - "UNPACK", - "OFFSET", - "PATTERN", - "IP", - "ON", - "IGNORING", - "GROUP_LEFT", - "GROUP_RIGHT", - "DECOLORIZE", - "DROP", - "KEEP", - "OR", - "AND", - "UNLESS", - "CMP_EQ", - "NEQ", - "LT", - "LTE", - "GT", - "GTE", - "ADD", - "SUB", - "MUL", - "DIV", - "MOD", - "POW", -} -var exprStatenames = [...]string{} - -const exprEofCode = 1 -const exprErrCode = 2 -const exprInitialStackSize = 16 - - -var exprExca = [...]int{ - -1, 1, - 1, -1, - -2, 0, -} - -const exprPrivate = 57344 - -const exprLast = 644 - -var exprAct = [...]int{ - - 290, 229, 85, 4, 215, 65, 183, 127, 205, 190, - 76, 201, 198, 64, 238, 5, 153, 188, 78, 2, - 57, 81, 49, 50, 51, 58, 59, 62, 63, 60, - 61, 52, 53, 54, 55, 56, 57, 284, 10, 50, - 51, 58, 59, 62, 63, 60, 61, 52, 53, 54, - 55, 56, 57, 58, 59, 62, 63, 60, 61, 52, - 53, 54, 55, 56, 57, 54, 55, 56, 57, 218, - 110, 137, 140, 216, 116, 52, 53, 54, 55, 56, - 57, 267, 293, 222, 16, 141, 266, 185, 157, 149, - 151, 152, 131, 263, 162, 221, 16, 68, 262, 155, - 73, 75, 167, 168, 165, 166, 298, 217, 70, 71, - 72, 164, 208, 151, 152, 169, 170, 171, 172, 173, - 174, 175, 176, 177, 178, 179, 180, 181, 182, 282, - 340, 367, 16, 294, 281, 295, 231, 137, 195, 367, - 143, 192, 95, 203, 207, 86, 87, 186, 184, 340, - 265, 293, 143, 185, 279, 387, 220, 16, 131, 278, - 382, 150, 261, 236, 111, 294, 370, 17, 18, 230, - 74, 295, 232, 233, 295, 307, 241, 142, 375, 17, - 18, 357, 347, 214, 209, 212, 213, 210, 211, 307, - 295, 249, 250, 251, 276, 356, 307, 16, 273, 275, - 341, 16, 355, 272, 270, 253, 295, 16, 84, 269, - 86, 87, 374, 296, 184, 17, 18, 364, 73, 75, - 225, 380, 328, 286, 372, 307, 70, 71, 72, 288, - 291, 354, 297, 240, 300, 360, 110, 303, 116, 304, - 17, 18, 292, 155, 289, 332, 301, 264, 268, 271, - 274, 277, 280, 283, 231, 317, 137, 343, 344, 345, - 311, 313, 316, 318, 319, 350, 137, 203, 207, 326, - 321, 325, 185, 331, 240, 305, 244, 131, 256, 240, - 17, 18, 185, 240, 17, 18, 225, 131, 74, 329, - 17, 18, 333, 307, 335, 337, 315, 339, 110, 309, - 307, 314, 338, 349, 334, 312, 308, 110, 296, 240, - 351, 302, 240, 73, 75, 234, 145, 225, 154, 144, - 13, 70, 71, 72, 137, 348, 327, 285, 13, 156, - 385, 242, 186, 184, 239, 361, 362, 156, 248, 247, - 110, 363, 226, 246, 245, 131, 219, 365, 366, 231, - 161, 160, 159, 371, 91, 90, 83, 381, 353, 254, - 228, 16, 306, 147, 260, 73, 75, 377, 259, 378, - 379, 13, 257, 70, 71, 72, 243, 299, 235, 146, - 6, 383, 148, 74, 21, 22, 23, 36, 45, 46, - 37, 39, 40, 38, 41, 42, 43, 44, 47, 24, - 25, 231, 227, 82, 258, 255, 369, 368, 336, 26, - 27, 28, 29, 30, 31, 32, 80, 346, 163, 33, - 34, 35, 48, 19, 237, 228, 191, 323, 324, 252, - 73, 75, 89, 191, 13, 74, 189, 88, 70, 71, - 72, 386, 384, 6, 17, 18, 373, 21, 22, 23, - 36, 45, 46, 37, 39, 40, 38, 41, 42, 43, - 44, 47, 24, 25, 3, 359, 231, 358, 330, 320, - 310, 77, 26, 27, 28, 29, 30, 31, 32, 287, - 73, 75, 33, 34, 35, 48, 19, 158, 70, 71, - 72, 224, 322, 73, 75, 199, 376, 13, 223, 222, - 74, 70, 71, 72, 221, 196, 6, 17, 18, 137, - 21, 22, 23, 36, 45, 46, 37, 39, 40, 38, - 41, 42, 43, 44, 47, 24, 25, 194, 193, 67, - 131, 352, 206, 202, 191, 26, 27, 28, 29, 30, - 31, 32, 82, 137, 199, 33, 34, 35, 48, 19, - 74, 128, 123, 124, 122, 129, 132, 134, 298, 73, - 75, 114, 115, 74, 131, 197, 119, 70, 71, 72, - 17, 18, 204, 121, 125, 200, 126, 120, 118, 92, - 117, 187, 133, 135, 136, 66, 123, 124, 122, 138, - 132, 134, 130, 139, 112, 231, 113, 94, 93, 11, - 9, 20, 12, 15, 8, 342, 14, 7, 125, 79, - 126, 69, 1, 0, 0, 293, 133, 135, 136, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 74, - 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, - 106, 107, 108, 109, -} -var exprPact = [...]int{ - - 354, -1000, -59, -1000, -1000, 478, 354, -1000, -1000, -1000, - -1000, -1000, -1000, 398, 330, 182, -1000, 430, 425, 329, - 328, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 95, - 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, - 95, 95, 95, 95, 478, -1000, 465, 538, -9, 79, - -1000, -1000, -1000, -1000, -1000, -1000, 292, 289, -59, 361, - -1000, -1000, 76, 311, 480, 326, 325, 324, -1000, -1000, - 354, 411, 354, 30, 26, -1000, 354, 354, 354, 354, - 354, 354, 354, 354, 354, 354, 354, 354, 354, 354, - -1000, -1000, -1000, -1000, -1000, -1000, 66, -1000, -1000, -1000, - -1000, -1000, 428, 529, 522, -1000, 521, -1000, -1000, -1000, - -1000, 319, 499, -1000, 539, 528, 527, 99, -1000, -1000, - 67, -12, 320, -1000, -1000, -1000, -1000, -1000, 537, 498, - 493, 492, 485, 315, 381, 415, 303, 288, 357, 417, - 307, 304, 355, 249, -43, 318, 317, 313, 312, -31, - -31, -27, -27, -75, -75, -75, -75, -15, -15, -15, - -15, -15, -15, 66, 319, 319, 319, 421, 338, -1000, - -1000, 392, 338, -1000, -1000, 251, -1000, 351, -1000, 391, - 347, -1000, 76, -1000, 343, -1000, 76, -1000, 89, 77, - 200, 194, 190, 150, 125, -1000, -44, 301, 67, 473, - -1000, -1000, -1000, -1000, -1000, -1000, 117, 303, 544, 123, - 203, 504, 350, 284, 117, 354, 248, 341, 279, -1000, - -1000, 272, -1000, 464, -1000, 278, 274, 269, 228, 261, - 66, 132, -1000, 338, 529, 463, -1000, 490, 422, 528, - 527, 300, -1000, -1000, -1000, 196, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, 67, 462, -1000, 246, -1000, 218, - 85, 84, 85, 399, 11, 319, 11, 120, 195, 407, - 155, 298, -1000, -1000, 238, -1000, 354, 526, -1000, -1000, - 337, 204, -1000, 175, -1000, -1000, 168, -1000, 154, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 461, 459, -1000, - 208, -1000, 117, 84, 85, 84, -1000, -1000, 66, -1000, - 11, -1000, 191, -1000, -1000, -1000, 80, 397, 396, 139, - 117, 197, -1000, 440, -1000, -1000, -1000, -1000, 185, 151, - -1000, -1000, 84, -1000, 491, 88, 84, 52, 11, 11, - 211, -1000, -1000, 336, -1000, -1000, 133, 84, -1000, -1000, - 11, 436, -1000, -1000, 309, 435, 128, -1000, -} -var exprPgo = [...]int{ - - 0, 612, 18, 611, 2, 14, 464, 3, 16, 7, - 609, 607, 606, 605, 15, 604, 603, 602, 601, 107, - 600, 38, 599, 579, 598, 597, 596, 594, 13, 5, - 593, 592, 589, 6, 585, 97, 4, 581, 580, 578, - 577, 575, 11, 573, 572, 8, 566, 12, 565, 9, - 17, 562, 561, 1, 555, 551, 0, -} -var exprR1 = [...]int{ - - 0, 1, 2, 2, 7, 7, 7, 7, 7, 7, - 7, 6, 6, 6, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 53, 53, 53, 13, 13, 13, 11, 11, 11, 11, - 15, 15, 15, 15, 15, 15, 22, 3, 3, 3, - 3, 3, 3, 14, 14, 14, 10, 10, 9, 9, - 9, 9, 28, 28, 29, 29, 29, 29, 29, 29, - 29, 29, 29, 29, 29, 19, 36, 36, 36, 35, - 35, 35, 34, 34, 34, 37, 37, 27, 27, 26, - 26, 26, 26, 52, 51, 51, 38, 39, 47, 47, - 48, 48, 48, 46, 33, 33, 33, 33, 33, 33, - 33, 33, 33, 49, 49, 50, 50, 55, 55, 54, - 54, 32, 32, 32, 32, 32, 32, 32, 30, 30, - 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, - 31, 31, 42, 42, 41, 41, 40, 45, 45, 44, - 44, 43, 20, 20, 20, 20, 20, 20, 20, 20, - 20, 20, 20, 20, 20, 20, 20, 24, 24, 25, - 25, 25, 25, 23, 23, 23, 23, 23, 23, 23, - 23, 21, 21, 21, 17, 18, 16, 16, 16, 16, - 16, 16, 16, 16, 16, 16, 16, 16, 12, 12, - 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, - 12, 12, 12, 56, 5, 5, 4, 4, 4, 4, -} -var exprR2 = [...]int{ - - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 3, 1, 2, 3, 2, 3, 4, 5, 3, 4, - 5, 6, 3, 4, 5, 6, 3, 4, 5, 6, - 4, 5, 6, 7, 3, 4, 4, 5, 3, 2, - 3, 6, 3, 1, 1, 1, 4, 6, 5, 7, - 4, 5, 5, 6, 7, 7, 12, 1, 1, 1, - 1, 1, 1, 3, 3, 2, 1, 3, 3, 3, - 3, 3, 1, 2, 1, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 2, 1, 1, 4, 3, 2, - 5, 4, 1, 3, 2, 1, 2, 1, 2, 1, - 2, 1, 2, 2, 3, 2, 2, 1, 3, 3, - 1, 3, 3, 2, 1, 1, 1, 1, 3, 2, - 3, 3, 3, 3, 1, 1, 3, 6, 6, 1, - 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 1, 1, 1, 3, 2, 1, 1, 1, - 3, 2, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 0, 1, 5, - 4, 5, 4, 1, 1, 2, 4, 5, 2, 4, - 5, 1, 2, 2, 4, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 2, 1, 3, 4, 4, 3, 3, -} -var exprChk = [...]int{ - - -1000, -1, -2, -6, -7, -14, 26, -11, -15, -20, - -21, -22, -17, 17, -12, -16, 7, 90, 91, 69, - -18, 30, 31, 32, 45, 46, 55, 56, 57, 58, - 59, 60, 61, 65, 66, 67, 33, 36, 39, 37, - 38, 40, 41, 42, 43, 34, 35, 44, 68, 81, - 82, 83, 90, 91, 92, 93, 94, 95, 84, 85, - 88, 89, 86, 87, -28, -29, -34, 51, -35, -3, - 23, 24, 25, 15, 85, 16, -7, -6, -2, -10, - 18, -9, 5, 26, 26, -4, 28, 29, 7, 7, - 26, 26, -23, -24, -25, 47, -23, -23, -23, -23, - -23, -23, -23, -23, -23, -23, -23, -23, -23, -23, - -29, -35, -27, -26, -52, -51, -33, -38, -39, -46, - -40, -43, 50, 48, 49, 70, 72, -9, -55, -54, - -31, 26, 52, 78, 53, 79, 80, 5, -32, -30, - 81, 6, -19, 73, 27, 27, 18, 2, 21, 13, - 85, 14, 15, -8, 7, -14, 26, -7, 7, 26, - 26, 26, -7, 7, -2, 74, 75, 76, 77, -2, - -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, - -2, -2, -2, -33, 82, 21, 81, -37, -50, 8, - -49, 5, -50, 6, 6, -33, 6, -48, -47, 5, - -41, -42, 5, -9, -44, -45, 5, -9, 13, 85, - 88, 89, 86, 87, 84, -36, 6, -19, 81, 26, - -9, 6, 6, 6, 6, 2, 27, 21, 10, -53, - -28, 51, -14, -8, 27, 21, -7, 7, -5, 27, - 5, -5, 27, 21, 27, 26, 26, 26, 26, -33, - -33, -33, 8, -50, 21, 13, 27, 21, 13, 21, - 21, 73, 9, 4, -21, 73, 9, 4, -21, 9, - 4, -21, 9, 4, -21, 9, 4, -21, 9, 4, - -21, 9, 4, -21, 81, 26, -36, 6, -4, -8, - -56, -53, -28, 71, 10, 51, 10, -53, 54, 27, - -53, -28, 27, -4, -7, 27, 21, 21, 27, 27, - 6, -5, 27, -5, 27, 27, -5, 27, -5, -49, - 6, -47, 2, 5, 6, -42, -45, 26, 26, -36, - 6, 27, 27, -53, -28, -53, 9, -56, -33, -56, - 10, 5, -13, 62, 63, 64, 10, 27, 27, -53, - 27, -7, 5, 21, 27, 27, 27, 27, 6, 6, - 27, -4, -53, -56, 26, -56, -53, 51, 10, 10, - 27, -4, 27, 6, 27, 27, 5, -53, -56, -56, - 10, 21, 27, -56, 6, 21, 6, 27, -} -var exprDef = [...]int{ - - 0, -2, 1, 2, 3, 11, 0, 4, 5, 6, - 7, 8, 9, 0, 0, 0, 191, 0, 0, 0, - 0, 208, 209, 210, 211, 212, 213, 214, 215, 216, - 217, 218, 219, 220, 221, 222, 196, 197, 198, 199, - 200, 201, 202, 203, 204, 205, 206, 207, 195, 177, - 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, - 177, 177, 177, 177, 12, 72, 74, 0, 92, 0, - 57, 58, 59, 60, 61, 62, 3, 2, 0, 0, - 65, 66, 0, 0, 0, 0, 0, 0, 192, 193, - 0, 0, 0, 183, 184, 178, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 73, 94, 75, 76, 77, 78, 79, 80, 81, 82, - 83, 84, 97, 99, 0, 101, 0, 114, 115, 116, - 117, 0, 0, 107, 0, 0, 0, 0, 129, 130, - 0, 89, 0, 85, 10, 13, 63, 64, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 3, 191, 0, - 0, 0, 3, 0, 162, 0, 0, 185, 188, 163, - 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, - 174, 175, 176, 119, 0, 0, 0, 98, 105, 95, - 125, 124, 103, 100, 102, 0, 106, 113, 110, 0, - 156, 154, 152, 153, 161, 159, 157, 158, 0, 0, - 0, 0, 0, 0, 0, 93, 86, 0, 0, 0, - 67, 68, 69, 70, 71, 39, 46, 0, 14, 0, - 0, 0, 0, 0, 50, 0, 3, 191, 0, 228, - 224, 0, 229, 0, 194, 0, 0, 0, 0, 120, - 121, 122, 96, 104, 0, 0, 118, 0, 0, 0, - 0, 0, 136, 143, 150, 0, 135, 142, 149, 131, - 138, 145, 132, 139, 146, 133, 140, 147, 134, 141, - 148, 137, 144, 151, 0, 0, 91, 0, 48, 0, - 15, 18, 34, 0, 22, 0, 26, 0, 0, 0, - 0, 0, 38, 52, 3, 51, 0, 0, 226, 227, - 0, 0, 180, 0, 182, 186, 0, 189, 0, 126, - 123, 111, 112, 108, 109, 155, 160, 0, 0, 88, - 0, 90, 47, 19, 35, 36, 223, 23, 42, 27, - 30, 40, 0, 43, 44, 45, 16, 0, 0, 0, - 53, 3, 225, 0, 179, 181, 187, 190, 0, 0, - 87, 49, 37, 31, 0, 17, 20, 0, 24, 28, - 0, 54, 55, 0, 127, 128, 0, 21, 25, 29, - 32, 0, 41, 33, 0, 0, 0, 56, -} -var exprTok1 = [...]int{ - - 1, -} -var exprTok2 = [...]int{ - - 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, - 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, - 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, - 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, - 92, 93, 94, 95, -} -var exprTok3 = [...]int{ - 0, -} - -var exprErrorMessages = [...]struct { - state int - token int - msg string -}{} - - -/* parser for yacc output */ - -var ( - exprDebug = 0 - exprErrorVerbose = false -) - -type exprLexer interface { - Lex(lval *exprSymType) int - Error(s string) -} - -type exprParser interface { - Parse(exprLexer) int - Lookahead() int -} - -type exprParserImpl struct { - lval exprSymType - stack [exprInitialStackSize]exprSymType - char int -} - -func (p *exprParserImpl) Lookahead() int { - return p.char -} - -func exprNewParser() exprParser { - return &exprParserImpl{} -} - -const exprFlag = -1000 - -func exprTokname(c int) string { - if c >= 1 && c-1 < len(exprToknames) { - if exprToknames[c-1] != "" { - return exprToknames[c-1] - } - } - return __yyfmt__.Sprintf("tok-%v", c) -} - -func exprStatname(s int) string { - if s >= 0 && s < len(exprStatenames) { - if exprStatenames[s] != "" { - return exprStatenames[s] - } - } - return __yyfmt__.Sprintf("state-%v", s) -} - -func exprErrorMessage(state, lookAhead int) string { - const TOKSTART = 4 - - if !exprErrorVerbose { - return "syntax error" - } - - for _, e := range exprErrorMessages { - if e.state == state && e.token == lookAhead { - return "syntax error: " + e.msg - } - } - - res := "syntax error: unexpected " + exprTokname(lookAhead) - - // To match Bison, suggest at most four expected tokens. - expected := make([]int, 0, 4) - - // Look for shiftable tokens. - base := exprPact[state] - for tok := TOKSTART; tok-1 < len(exprToknames); tok++ { - if n := base + tok; n >= 0 && n < exprLast && exprChk[exprAct[n]] == tok { - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - } - - if exprDef[state] == -2 { - i := 0 - for exprExca[i] != -1 || exprExca[i+1] != state { - i += 2 - } - - // Look for tokens that we accept or reduce. - for i += 2; exprExca[i] >= 0; i += 2 { - tok := exprExca[i] - if tok < TOKSTART || exprExca[i+1] == 0 { - continue - } - if len(expected) == cap(expected) { - return res - } - expected = append(expected, tok) - } - - // If the default action is to accept or reduce, give up. - if exprExca[i+1] != 0 { - return res - } - } - - for i, tok := range expected { - if i == 0 { - res += ", expecting " - } else { - res += " or " - } - res += exprTokname(tok) - } - return res -} - -func exprlex1(lex exprLexer, lval *exprSymType) (char, token int) { - token = 0 - char = lex.Lex(lval) - if char <= 0 { - token = exprTok1[0] - goto out - } - if char < len(exprTok1) { - token = exprTok1[char] - goto out - } - if char >= exprPrivate { - if char < exprPrivate+len(exprTok2) { - token = exprTok2[char-exprPrivate] - goto out - } - } - for i := 0; i < len(exprTok3); i += 2 { - token = exprTok3[i+0] - if token == char { - token = exprTok3[i+1] - goto out - } - } - -out: - if token == 0 { - token = exprTok2[1] /* unknown char */ - } - if exprDebug >= 3 { - __yyfmt__.Printf("lex %s(%d)\n", exprTokname(token), uint(char)) - } - return char, token -} - -func exprParse(exprlex exprLexer) int { - return exprNewParser().Parse(exprlex) -} - -func (exprrcvr *exprParserImpl) Parse(exprlex exprLexer) int { - var exprn int - var exprVAL exprSymType - var exprDollar []exprSymType - _ = exprDollar // silence set and not used - exprS := exprrcvr.stack[:] - - Nerrs := 0 /* number of errors */ - Errflag := 0 /* error recovery flag */ - exprstate := 0 - exprrcvr.char = -1 - exprtoken := -1 // exprrcvr.char translated into internal numbering - defer func() { - // Make sure we report no lookahead when not parsing. - exprstate = -1 - exprrcvr.char = -1 - exprtoken = -1 - }() - exprp := -1 - goto exprstack - -ret0: - return 0 - -ret1: - return 1 - -exprstack: - /* put a state and value onto the stack */ - if exprDebug >= 4 { - __yyfmt__.Printf("char %v in %v\n", exprTokname(exprtoken), exprStatname(exprstate)) - } - - exprp++ - if exprp >= len(exprS) { - nyys := make([]exprSymType, len(exprS)*2) - copy(nyys, exprS) - exprS = nyys - } - exprS[exprp] = exprVAL - exprS[exprp].yys = exprstate - -exprnewstate: - exprn = exprPact[exprstate] - if exprn <= exprFlag { - goto exprdefault /* simple state */ - } - if exprrcvr.char < 0 { - exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) - } - exprn += exprtoken - if exprn < 0 || exprn >= exprLast { - goto exprdefault - } - exprn = exprAct[exprn] - if exprChk[exprn] == exprtoken { /* valid shift */ - exprrcvr.char = -1 - exprtoken = -1 - exprVAL = exprrcvr.lval - exprstate = exprn - if Errflag > 0 { - Errflag-- - } - goto exprstack - } - -exprdefault: - /* default state action */ - exprn = exprDef[exprstate] - if exprn == -2 { - if exprrcvr.char < 0 { - exprrcvr.char, exprtoken = exprlex1(exprlex, &exprrcvr.lval) - } - - /* look through exception table */ - xi := 0 - for { - if exprExca[xi+0] == -1 && exprExca[xi+1] == exprstate { - break - } - xi += 2 - } - for xi += 2; ; xi += 2 { - exprn = exprExca[xi+0] - if exprn < 0 || exprn == exprtoken { - break - } - } - exprn = exprExca[xi+1] - if exprn < 0 { - goto ret0 - } - } - if exprn == 0 { - /* error ... attempt to resume parsing */ - switch Errflag { - case 0: /* brand new error */ - exprlex.Error(exprErrorMessage(exprstate, exprtoken)) - Nerrs++ - if exprDebug >= 1 { - __yyfmt__.Printf("%s", exprStatname(exprstate)) - __yyfmt__.Printf(" saw %s\n", exprTokname(exprtoken)) - } - fallthrough - - case 1, 2: /* incompletely recovered error ... try again */ - Errflag = 3 - - /* find a state where "error" is a legal shift action */ - for exprp >= 0 { - exprn = exprPact[exprS[exprp].yys] + exprErrCode - if exprn >= 0 && exprn < exprLast { - exprstate = exprAct[exprn] /* simulate a shift of "error" */ - if exprChk[exprstate] == exprErrCode { - goto exprstack - } - } - - /* the current p has no shift on "error", pop stack */ - if exprDebug >= 2 { - __yyfmt__.Printf("error recovery pops state %d\n", exprS[exprp].yys) - } - exprp-- - } - /* there is no state on the stack with an error shift ... abort */ - goto ret1 - - case 3: /* no shift yet; clobber input char */ - if exprDebug >= 2 { - __yyfmt__.Printf("error recovery discards %s\n", exprTokname(exprtoken)) - } - if exprtoken == exprEofCode { - goto ret1 - } - exprrcvr.char = -1 - exprtoken = -1 - goto exprnewstate /* try again in the same state */ - } - } - - /* reduction by production exprn */ - if exprDebug >= 2 { - __yyfmt__.Printf("reduce %v in:\n\t%v\n", exprn, exprStatname(exprstate)) - } - - exprnt := exprn - exprpt := exprp - _ = exprpt // guard against "declared and not used" - - exprp -= exprR2[exprn] - // exprp is now the index of $0. Perform the default action. Iff the - // reduced production is ε, $1 is possibly out of range. - if exprp+1 >= len(exprS) { - nyys := make([]exprSymType, len(exprS)*2) - copy(nyys, exprS) - exprS = nyys - } - exprVAL = exprS[exprp+1] - - /* consult goto table to find next state */ - exprn = exprR1[exprn] - exprg := exprPgo[exprn] - exprj := exprg + exprS[exprp].yys + 1 - - if exprj >= exprLast { - exprstate = exprAct[exprg] - } else { - exprstate = exprAct[exprj] - if exprChk[exprstate] != -exprn { - exprstate = exprAct[exprg] - } - } - // dummy call; replaced with literal code - switch exprnt { - - case 1: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprlex.(*parser).expr = exprDollar[1].Expr - } - case 2: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Expr = exprDollar[1].LogExpr - } - case 3: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Expr = exprDollar[1].MetricExpr - } - case 4: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].RangeAggregationExpr - } - case 5: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].VectorAggregationExpr - } - case 6: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].BinOpExpr - } - case 7: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].LiteralExpr - } - case 8: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].LabelReplaceExpr - } - case 9: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[1].VectorExpr - } - case 10: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.MetricExpr = exprDollar[2].MetricExpr - } - case 11: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LogExpr = newMatcherExpr(exprDollar[1].Selector) - } - case 12: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LogExpr = newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr) - } - case 13: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogExpr = exprDollar[2].LogExpr - } - case 14: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, nil) - } - case 15: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) - } - case 16: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, nil) - } - case 17: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, nil, exprDollar[5].OffsetExpr) - } - case 18: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[3].UnwrapExpr, nil) - } - case 19: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].duration, exprDollar[4].UnwrapExpr, exprDollar[3].OffsetExpr) - } - case 20: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[5].UnwrapExpr, nil) - } - case 21: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[4].duration, exprDollar[6].UnwrapExpr, exprDollar[5].OffsetExpr) - } - case 22: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, nil) - } - case 23: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].duration, exprDollar[2].UnwrapExpr, exprDollar[4].OffsetExpr) - } - case 24: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, nil) - } - case 25: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newMatcherExpr(exprDollar[2].Selector), exprDollar[5].duration, exprDollar[3].UnwrapExpr, exprDollar[6].OffsetExpr) - } - case 26: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, nil) - } - case 27: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[3].duration, nil, exprDollar[4].OffsetExpr) - } - case 28: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, nil) - } - case 29: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[5].duration, nil, exprDollar[6].OffsetExpr) - } - case 30: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, nil) - } - case 31: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[2].PipelineExpr), exprDollar[4].duration, exprDollar[3].UnwrapExpr, exprDollar[5].OffsetExpr) - } - case 32: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, nil) - } - case 33: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[2].Selector), exprDollar[3].PipelineExpr), exprDollar[6].duration, exprDollar[4].UnwrapExpr, exprDollar[7].OffsetExpr) - } - case 34: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, nil, nil) - } - case 35: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, nil, exprDollar[3].OffsetExpr) - } - case 36: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[3].PipelineExpr), exprDollar[2].duration, exprDollar[4].UnwrapExpr, nil) - } - case 37: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LogRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(exprDollar[1].Selector), exprDollar[4].PipelineExpr), exprDollar[2].duration, exprDollar[5].UnwrapExpr, exprDollar[3].OffsetExpr) - } - case 38: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogRangeExpr = exprDollar[2].LogRangeExpr - } - case 40: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[3].str, "") - } - case 41: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.UnwrapExpr = newUnwrapExpr(exprDollar[5].str, exprDollar[3].ConvOp) - } - case 42: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.UnwrapExpr = exprDollar[1].UnwrapExpr.addPostFilter(exprDollar[3].LabelFilter) - } - case 43: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.ConvOp = OpConvBytes - } - case 44: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.ConvOp = OpConvDuration - } - case 45: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.ConvOp = OpConvDurationSeconds - } - case 46: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, nil, nil) - } - case 47: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, nil, &exprDollar[3].str) - } - case 48: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[3].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[5].Grouping, nil) - } - case 49: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.RangeAggregationExpr = newRangeAggregationExpr(exprDollar[5].LogRangeExpr, exprDollar[1].RangeOp, exprDollar[7].Grouping, &exprDollar[3].str) - } - case 50: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, nil, nil) - } - case 51: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[4].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, nil) - } - case 52: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[3].MetricExpr, exprDollar[1].VectorOp, exprDollar[5].Grouping, nil) - } - case 53: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, nil, &exprDollar[3].str) - } - case 54: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[5].MetricExpr, exprDollar[1].VectorOp, exprDollar[7].Grouping, &exprDollar[3].str) - } - case 55: - exprDollar = exprS[exprpt-7 : exprpt+1] - { - exprVAL.VectorAggregationExpr = mustNewVectorAggregationExpr(exprDollar[6].MetricExpr, exprDollar[1].VectorOp, exprDollar[2].Grouping, &exprDollar[4].str) - } - case 56: - exprDollar = exprS[exprpt-12 : exprpt+1] - { - exprVAL.LabelReplaceExpr = mustNewLabelReplaceExpr(exprDollar[3].MetricExpr, exprDollar[5].str, exprDollar[7].str, exprDollar[9].str, exprDollar[11].str) - } - case 57: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchRegexp - } - case 58: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchEqual - } - case 59: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchPattern - } - case 60: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchNotRegexp - } - case 61: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchNotEqual - } - case 62: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Filter = log.LineMatchNotPattern - } - case 63: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Selector = exprDollar[2].Matchers - } - case 64: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Selector = exprDollar[2].Matchers - } - case 65: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - } - case 66: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Matchers = []*labels.Matcher{exprDollar[1].Matcher} - } - case 67: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matchers = append(exprDollar[1].Matchers, exprDollar[3].Matcher) - } - case 68: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchEqual, exprDollar[1].str, exprDollar[3].str) - } - case 69: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchNotEqual, exprDollar[1].str, exprDollar[3].str) - } - case 70: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchRegexp, exprDollar[1].str, exprDollar[3].str) - } - case 71: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Matcher = mustNewMatcher(labels.MatchNotRegexp, exprDollar[1].str, exprDollar[3].str) - } - case 72: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.PipelineExpr = MultiStageExpr{exprDollar[1].PipelineStage} - } - case 73: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineExpr = append(exprDollar[1].PipelineExpr, exprDollar[2].PipelineStage) - } - case 74: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[1].LineFilters - } - case 75: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].LogfmtParser - } - case 76: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].LabelParser - } - case 77: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].JSONExpressionParser - } - case 78: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].LogfmtExpressionParser - } - case 79: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = &LabelFilterExpr{LabelFilterer: exprDollar[2].LabelFilter} - } - case 80: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].LineFormatExpr - } - case 81: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].DecolorizeExpr - } - case 82: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].LabelFormatExpr - } - case 83: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].DropLabelsExpr - } - case 84: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.PipelineStage = exprDollar[2].KeepLabelsExpr - } - case 85: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.FilterOp = OpFilterIP - } - case 86: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.OrFilter = newLineFilterExpr(log.LineMatchEqual, "", exprDollar[1].str) - } - case 87: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.OrFilter = newLineFilterExpr(log.LineMatchEqual, exprDollar[1].FilterOp, exprDollar[3].str) - } - case 88: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.OrFilter = newOrLineFilter(newLineFilterExpr(log.LineMatchEqual, "", exprDollar[1].str), exprDollar[3].OrFilter) - } - case 89: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str) - } - case 90: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.LineFilter = newLineFilterExpr(exprDollar[1].Filter, exprDollar[2].FilterOp, exprDollar[4].str) - } - case 91: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.LineFilter = newOrLineFilter(newLineFilterExpr(exprDollar[1].Filter, "", exprDollar[2].str), exprDollar[4].OrFilter) - } - case 92: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LineFilters = exprDollar[1].LineFilter - } - case 93: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LineFilters = newOrLineFilter(exprDollar[1].LineFilter, exprDollar[3].OrFilter) - } - case 94: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LineFilters = newNestedLineFilterExpr(exprDollar[1].LineFilters, exprDollar[2].LineFilter) - } - case 95: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.ParserFlags = []string{exprDollar[1].str} - } - case 96: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.ParserFlags = append(exprDollar[1].ParserFlags, exprDollar[2].str) - } - case 97: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LogfmtParser = newLogfmtParserExpr(nil) - } - case 98: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LogfmtParser = newLogfmtParserExpr(exprDollar[2].ParserFlags) - } - case 99: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelParser = newLabelParserExpr(OpParserTypeJSON, "") - } - case 100: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LabelParser = newLabelParserExpr(OpParserTypeRegexp, exprDollar[2].str) - } - case 101: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelParser = newLabelParserExpr(OpParserTypeUnpack, "") - } - case 102: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LabelParser = newLabelParserExpr(OpParserTypePattern, exprDollar[2].str) - } - case 103: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.JSONExpressionParser = newJSONExpressionParser(exprDollar[2].LabelExtractionExpressionList) - } - case 104: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LogfmtExpressionParser = newLogfmtExpressionParser(exprDollar[3].LabelExtractionExpressionList, exprDollar[2].ParserFlags) - } - case 105: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LogfmtExpressionParser = newLogfmtExpressionParser(exprDollar[2].LabelExtractionExpressionList, nil) - } - case 106: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LineFormatExpr = newLineFmtExpr(exprDollar[2].str) - } - case 107: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.DecolorizeExpr = newDecolorizeExpr() - } - case 108: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFormat = log.NewRenameLabelFmt(exprDollar[1].str, exprDollar[3].str) - } - case 109: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFormat = log.NewTemplateLabelFmt(exprDollar[1].str, exprDollar[3].str) - } - case 110: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelsFormat = []log.LabelFmt{exprDollar[1].LabelFormat} - } - case 111: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelsFormat = append(exprDollar[1].LabelsFormat, exprDollar[3].LabelFormat) - } - case 113: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LabelFormatExpr = newLabelFmtExpr(exprDollar[2].LabelsFormat) - } - case 114: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelFilter = log.NewStringLabelFilter(exprDollar[1].Matcher) - } - case 115: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelFilter = exprDollar[1].IPLabelFilter - } - case 116: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelFilter = exprDollar[1].UnitFilter - } - case 117: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelFilter = exprDollar[1].NumberFilter - } - case 118: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFilter = exprDollar[2].LabelFilter - } - case 119: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[2].LabelFilter) - } - case 120: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) - } - case 121: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFilter = log.NewAndLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) - } - case 122: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelFilter = log.NewOrLabelFilter(exprDollar[1].LabelFilter, exprDollar[3].LabelFilter) - } - case 123: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[3].str) - } - case 124: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelExtractionExpression = log.NewLabelExtractionExpr(exprDollar[1].str, exprDollar[1].str) - } - case 125: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LabelExtractionExpressionList = []log.LabelExtractionExpr{exprDollar[1].LabelExtractionExpression} - } - case 126: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.LabelExtractionExpressionList = append(exprDollar[1].LabelExtractionExpressionList, exprDollar[3].LabelExtractionExpression) - } - case 127: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterEqual) - } - case 128: - exprDollar = exprS[exprpt-6 : exprpt+1] - { - exprVAL.IPLabelFilter = log.NewIPLabelFilter(exprDollar[5].str, exprDollar[1].str, log.LabelFilterNotEqual) - } - case 129: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.UnitFilter = exprDollar[1].DurationFilter - } - case 130: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.UnitFilter = exprDollar[1].BytesFilter - } - case 131: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].duration) - } - case 132: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].duration) - } - case 133: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].duration) - } - case 134: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].duration) - } - case 135: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].duration) - } - case 136: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) - } - case 137: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DurationFilter = log.NewDurationLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].duration) - } - case 138: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].bytes) - } - case 139: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) - } - case 140: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].bytes) - } - case 141: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].bytes) - } - case 142: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].bytes) - } - case 143: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) - } - case 144: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.BytesFilter = log.NewBytesLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].bytes) - } - case 145: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 146: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 147: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThan, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 148: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 149: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterNotEqual, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 150: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 151: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.NumberFilter = log.NewNumericLabelFilter(log.LabelFilterEqual, exprDollar[1].str, exprDollar[3].LiteralExpr.Val) - } - case 152: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.DropLabel = log.NewDropLabel(nil, exprDollar[1].str) - } - case 153: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.DropLabel = log.NewDropLabel(exprDollar[1].Matcher, "") - } - case 154: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.DropLabels = []log.DropLabel{exprDollar[1].DropLabel} - } - case 155: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.DropLabels = append(exprDollar[1].DropLabels, exprDollar[3].DropLabel) - } - case 156: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.DropLabelsExpr = newDropLabelsExpr(exprDollar[2].DropLabels) - } - case 157: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.KeepLabel = log.NewKeepLabel(nil, exprDollar[1].str) - } - case 158: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.KeepLabel = log.NewKeepLabel(exprDollar[1].Matcher, "") - } - case 159: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.KeepLabels = []log.KeepLabel{exprDollar[1].KeepLabel} - } - case 160: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.KeepLabels = append(exprDollar[1].KeepLabels, exprDollar[3].KeepLabel) - } - case 161: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.KeepLabelsExpr = newKeepLabelsExpr(exprDollar[2].KeepLabels) - } - case 162: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("or", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 163: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("and", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 164: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("unless", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 165: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("+", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 166: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("-", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 167: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("*", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 168: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("/", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 169: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("%", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 170: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("^", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 171: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("==", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 172: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("!=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 173: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr(">", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 174: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr(">=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 175: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("<", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 176: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpExpr = mustNewBinOpExpr("<=", exprDollar[3].BinOpModifier, exprDollar[1].Expr, exprDollar[4].Expr) - } - case 177: - exprDollar = exprS[exprpt-0 : exprpt+1] - { - exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} - } - case 178: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.BoolModifier = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true} - } - case 179: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier - exprVAL.OnOrIgnoringModifier.VectorMatching.On = true - exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels - } - case 180: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier - exprVAL.OnOrIgnoringModifier.VectorMatching.On = true - } - case 181: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier - exprVAL.OnOrIgnoringModifier.VectorMatching.MatchingLabels = exprDollar[4].Labels - } - case 182: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.OnOrIgnoringModifier = exprDollar[1].BoolModifier - } - case 183: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].BoolModifier - } - case 184: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - } - case 185: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne - } - case 186: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne - } - case 187: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardManyToOne - exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels - } - case 188: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany - } - case 189: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany - } - case 190: - exprDollar = exprS[exprpt-5 : exprpt+1] - { - exprVAL.BinOpModifier = exprDollar[1].OnOrIgnoringModifier - exprVAL.BinOpModifier.VectorMatching.Card = CardOneToMany - exprVAL.BinOpModifier.VectorMatching.Include = exprDollar[4].Labels - } - case 191: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[1].str, false) - } - case 192: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, false) - } - case 193: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.LiteralExpr = mustNewLiteralExpr(exprDollar[2].str, true) - } - case 194: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.VectorExpr = NewVectorExpr(exprDollar[3].str) - } - case 195: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Vector = OpTypeVector - } - case 196: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeSum - } - case 197: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeAvg - } - case 198: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeCount - } - case 199: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMax - } - case 200: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeMin - } - case 201: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStddev - } - case 202: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeStdvar - } - case 203: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeBottomK - } - case 204: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeTopK - } - case 205: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeSort - } - case 206: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeSortDesc - } - case 207: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.VectorOp = OpTypeApproxTopK - } - case 208: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeCount - } - case 209: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeRate - } - case 210: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeRateCounter - } - case 211: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeBytes - } - case 212: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeBytesRate - } - case 213: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeAvg - } - case 214: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeSum - } - case 215: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeMin - } - case 216: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeMax - } - case 217: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeStdvar - } - case 218: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeStddev - } - case 219: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeQuantile - } - case 220: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeFirst - } - case 221: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeLast - } - case 222: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.RangeOp = OpRangeTypeAbsent - } - case 223: - exprDollar = exprS[exprpt-2 : exprpt+1] - { - exprVAL.OffsetExpr = newOffsetExpr(exprDollar[2].duration) - } - case 224: - exprDollar = exprS[exprpt-1 : exprpt+1] - { - exprVAL.Labels = []string{exprDollar[1].str} - } - case 225: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Labels = append(exprDollar[1].Labels, exprDollar[3].str) - } - case 226: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: false, Groups: exprDollar[3].Labels} - } - case 227: - exprDollar = exprS[exprpt-4 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: true, Groups: exprDollar[3].Labels} - } - case 228: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: false, Groups: nil} - } - case 229: - exprDollar = exprS[exprpt-3 : exprpt+1] - { - exprVAL.Grouping = &Grouping{Without: true, Groups: nil} - } - } - goto exprstack /* stack new state and value */ -} diff --git a/pkg/logql/syntax/lex.go b/pkg/logql/syntax/lex.go index 4175682efa5cf..df0d1c324185c 100644 --- a/pkg/logql/syntax/lex.go +++ b/pkg/logql/syntax/lex.go @@ -138,7 +138,7 @@ type lexer struct { builder strings.Builder } -func (l *lexer) Lex(lval *exprSymType) int { +func (l *lexer) Lex(lval *syntaxSymType) int { r := l.Scan() switch r { @@ -158,7 +158,7 @@ func (l *lexer) Lex(lval *exprSymType) int { duration, ok := tryScanDuration(numberText, &l.Scanner) if ok { - lval.duration = duration + lval.dur = duration return DURATION } @@ -174,13 +174,13 @@ func (l *lexer) Lex(lval *exprSymType) int { if l.Peek() == '-' { if flag, ok := tryScanFlag(&l.Scanner); ok { lval.str = flag - return PARSER_FLAG + return FUNCTION_FLAG } } tokenText := l.TokenText() if duration, ok := tryScanDuration(tokenText, &l.Scanner); ok { - lval.duration = duration + lval.dur = duration return DURATION } @@ -209,7 +209,7 @@ func (l *lexer) Lex(lval *exprSymType) int { l.Error(err.Error()) return 0 } - lval.duration = time.Duration(i) + lval.dur = time.Duration(i) return RANGE } _, _ = l.builder.WriteRune(r) diff --git a/pkg/logql/syntax/lex_test.go b/pkg/logql/syntax/lex_test.go index 0e9ad2aac3338..015d0cc146cf2 100644 --- a/pkg/logql/syntax/lex_test.go +++ b/pkg/logql/syntax/lex_test.go @@ -77,13 +77,13 @@ func TestLex(t *testing.T) { {`{foo="bar"} #|~ "\\w+"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE}}, {`#{foo="bar"} |~ "\\w+"`, []int{}}, {`{foo="#"}`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE}}, - {`{foo="bar"}|logfmt --strict"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PARSER_FLAG}}, - {`{foo="bar"}|LOGFMT --strict"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PARSER_FLAG}}, + {`{foo="bar"}|logfmt --strict"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, FUNCTION_FLAG}}, + {`{foo="bar"}|LOGFMT --strict"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, FUNCTION_FLAG}}, {`{foo="bar"}|logfmt|ip="b"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PIPE, IDENTIFIER, EQ, STRING}}, {`{foo="bar"}|logfmt|rate="b"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PIPE, IDENTIFIER, EQ, STRING}}, {`{foo="bar"}|logfmt|b=ip("b")`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PIPE, IDENTIFIER, EQ, IP, OPEN_PARENTHESIS, STRING, CLOSE_PARENTHESIS}}, {`{foo="bar"}|logfmt|=ip("b")`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PIPE_EXACT, IP, OPEN_PARENTHESIS, STRING, CLOSE_PARENTHESIS}}, - {`{foo="bar"}|logfmt --strict --keep-empty|=ip("b")`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PARSER_FLAG, PARSER_FLAG, PIPE_EXACT, IP, OPEN_PARENTHESIS, STRING, CLOSE_PARENTHESIS}}, + {`{foo="bar"}|logfmt --strict --keep-empty|=ip("b")`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, FUNCTION_FLAG, FUNCTION_FLAG, PIPE_EXACT, IP, OPEN_PARENTHESIS, STRING, CLOSE_PARENTHESIS}}, {`ip`, []int{IDENTIFIER}}, {`rate`, []int{IDENTIFIER}}, {`{foo="bar"} | json | baz="#"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, JSON, PIPE, IDENTIFIER, EQ, STRING}}, @@ -92,8 +92,8 @@ func TestLex(t *testing.T) { | json`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, JSON}}, {`{foo="bar"} | json code="response.code", param="request.params[0]"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, JSON, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, {`{foo="bar"} | logfmt code="response.code", IPAddress="host"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, - {`{foo="bar"} | logfmt --strict code"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PARSER_FLAG, IDENTIFIER}}, - {`{foo="bar"} | logfmt --keep-empty --strict code="response.code", IPAddress="host"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, PARSER_FLAG, PARSER_FLAG, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, + {`{foo="bar"} | logfmt --strict code"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, FUNCTION_FLAG, IDENTIFIER}}, + {`{foo="bar"} | logfmt --keep-empty --strict code="response.code", IPAddress="host"`, []int{OPEN_BRACE, IDENTIFIER, EQ, STRING, CLOSE_BRACE, PIPE, LOGFMT, FUNCTION_FLAG, FUNCTION_FLAG, IDENTIFIER, EQ, STRING, COMMA, IDENTIFIER, EQ, STRING}}, {`decolorize`, []int{DECOLORIZE}}, {`123`, []int{NUMBER}}, {`-123`, []int{SUB, NUMBER}}, @@ -121,7 +121,7 @@ func TestLex(t *testing.T) { }, } l.Init(strings.NewReader(tc.input)) - var lval exprSymType + var lval syntaxSymType for { tok := l.Lex(&lval) if tok == 0 { diff --git a/pkg/logql/syntax/parser.go b/pkg/logql/syntax/parser.go index 524c86109afb4..95ef55dcfa5dd 100644 --- a/pkg/logql/syntax/parser.go +++ b/pkg/logql/syntax/parser.go @@ -23,7 +23,7 @@ const ( var parserPool = sync.Pool{ New: func() interface{} { p := &parser{ - p: &exprParserImpl{}, + p: &syntaxParserImpl{}, Reader: strings.NewReader(""), lexer: &lexer{}, } @@ -41,16 +41,16 @@ const maxInputSize = 131072 func init() { // Improve the error messages coming out of yacc. - exprErrorVerbose = true + syntaxErrorVerbose = true // uncomment when you need to understand yacc rule tree. // exprDebug = 3 for str, tok := range tokens { - exprToknames[tok-exprPrivate+1] = str + syntaxToknames[tok-syntaxPrivate+1] = str } } type parser struct { - p *exprParserImpl + p *syntaxParserImpl *lexer expr Expr *strings.Reader diff --git a/pkg/logql/syntax/parser_test.go b/pkg/logql/syntax/parser_test.go index 7f257cd3c25d1..09a829c5c4006 100644 --- a/pkg/logql/syntax/parser_test.go +++ b/pkg/logql/syntax/parser_test.go @@ -26,7 +26,7 @@ var ParseTestCases = []struct { in: "count_over_time({foo=~`bar\\w+`}[12h] |~ `error\\`)", exp: &RangeAggregationExpr{ Operation: "count_over_time", - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &PipelineExpr{ MultiStages: MultiStageExpr{ newLineFilterExpr(log.LineMatchRegexp, "", "error\\"), @@ -55,7 +55,7 @@ var ParseTestCases = []struct { in: `count_over_time({foo="bar"}[12h] |= "error")`, exp: &RangeAggregationExpr{ Operation: "count_over_time", - Left: &LogRange{ + Left: &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}), MultiStageExpr{ @@ -71,7 +71,7 @@ var ParseTestCases = []struct { in: `count_over_time({foo="bar"} |= "error" [12h])`, exp: &RangeAggregationExpr{ Operation: "count_over_time", - Left: &LogRange{ + Left: &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "foo", Value: "bar"}}), MultiStageExpr{newLineFilterExpr(log.LineMatchEqual, "", "error")}, @@ -109,7 +109,7 @@ var ParseTestCases = []struct { { in: `count_over_time({ foo = "bar" }[12m])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -119,7 +119,7 @@ var ParseTestCases = []struct { { in: `bytes_over_time({ foo = "bar" }[12m])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -129,7 +129,7 @@ var ParseTestCases = []struct { { in: `bytes_rate({ foo = "bar" }[12m])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -139,7 +139,7 @@ var ParseTestCases = []struct { { in: `rate({ foo = "bar" }[5h])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -188,7 +188,7 @@ var ParseTestCases = []struct { { in: `rate({ foo = "bar" }[5d])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * 24 * time.Hour, }, @@ -198,7 +198,7 @@ var ParseTestCases = []struct { { in: `count_over_time({ foo = "bar" }[1w])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 7 * 24 * time.Hour, }, @@ -208,7 +208,7 @@ var ParseTestCases = []struct { { in: `absent_over_time({ foo = "bar" }[1w])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 7 * 24 * time.Hour, }, @@ -218,7 +218,7 @@ var ParseTestCases = []struct { { in: `sum(rate({ foo = "bar" }[5h]))`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -228,7 +228,7 @@ var ParseTestCases = []struct { { in: `sum(rate({ foo ="bar" }[1y]))`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 365 * 24 * time.Hour, }, @@ -238,7 +238,7 @@ var ParseTestCases = []struct { { in: `avg(count_over_time({ foo = "bar" }[5h])) by (bar,foo)`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -261,7 +261,7 @@ var ParseTestCases = []struct { exp: mustNewVectorAggregationExpr( mustNewLabelReplaceExpr( &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -277,7 +277,7 @@ var ParseTestCases = []struct { { in: `avg(count_over_time({ foo = "bar" }[5h])) by ()`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -290,7 +290,7 @@ var ParseTestCases = []struct { { in: `max without (bar) (count_over_time({ foo = "bar" }[5h]))`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -303,7 +303,7 @@ var ParseTestCases = []struct { { in: `max without () (count_over_time({ foo = "bar" }[5h]))`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -316,7 +316,7 @@ var ParseTestCases = []struct { { in: `topk(10,count_over_time({ foo = "bar" }[5h])) without (bar)`, exp: mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -329,7 +329,7 @@ var ParseTestCases = []struct { { in: `bottomk(30 ,sum(rate({ foo = "bar" }[5h])) by (foo))`, exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -343,7 +343,7 @@ var ParseTestCases = []struct { { in: `max( sum(count_over_time({ foo = "bar" }[5h])) without (foo,bar) ) by (foo)`, exp: mustNewVectorAggregationExpr(mustNewVectorAggregationExpr(&RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 5 * time.Hour, }, @@ -689,7 +689,7 @@ var ParseTestCases = []struct { { in: `count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])`, exp: newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -711,7 +711,7 @@ var ParseTestCases = []struct { { in: `bytes_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])`, exp: newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -733,7 +733,7 @@ var ParseTestCases = []struct { { in: `bytes_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap" | unpack)[5m])`, exp: newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -765,7 +765,7 @@ var ParseTestCases = []struct { `, exp: mustNewLabelReplaceExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -792,7 +792,7 @@ var ParseTestCases = []struct { { in: `sum(count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) by (foo)`, exp: mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -820,7 +820,7 @@ var ParseTestCases = []struct { { in: `sum(bytes_rate(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) by (foo)`, exp: mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -848,7 +848,7 @@ var ParseTestCases = []struct { { in: `topk(5,count_over_time(({foo="bar"} |= "baz" |~ "blip" != "flip" !~ "flap")[5m])) without (foo)`, exp: mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -878,7 +878,7 @@ var ParseTestCases = []struct { exp: mustNewVectorAggregationExpr( mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -909,7 +909,7 @@ var ParseTestCases = []struct { { in: `count_over_time({foo="bar"}[5m] |= "baz" |~ "blip" != "flip" !~ "flap")`, exp: newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -931,7 +931,7 @@ var ParseTestCases = []struct { { in: `sum(count_over_time({foo="bar"}[5m] |= "baz" |~ "blip" != "flip" !~ "flap")) by (foo)`, exp: mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -959,7 +959,7 @@ var ParseTestCases = []struct { { in: `topk(5,count_over_time({foo="bar"}[5m] |= "baz" |~ "blip" != "flip" !~ "flap")) without (foo)`, exp: mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -989,7 +989,7 @@ var ParseTestCases = []struct { exp: mustNewVectorAggregationExpr( mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}), MultiStageExpr{ @@ -1057,7 +1057,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1073,7 +1073,7 @@ var ParseTestCases = []struct { nil, ), mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1090,7 +1090,7 @@ var ParseTestCases = []struct { ), ), mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1124,7 +1124,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1140,7 +1140,7 @@ var ParseTestCases = []struct { nil, ), mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1157,7 +1157,7 @@ var ParseTestCases = []struct { ), ), mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1187,7 +1187,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1208,7 +1208,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1224,7 +1224,7 @@ var ParseTestCases = []struct { nil, ), mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -1254,7 +1254,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "namespace", "tns"), @@ -1265,7 +1265,7 @@ var ParseTestCases = []struct { Interval: 5 * time.Minute, }, OpRangeTypeCount, nil, nil), newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "namespace", "tns"), @@ -1288,7 +1288,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newPipelineExpr( newMatcherExpr([]*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "namespace", "tns"), @@ -1299,7 +1299,7 @@ var ParseTestCases = []struct { Interval: 5 * time.Minute, }, OpRangeTypeCount, nil, nil), newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "namespace", "tns"), @@ -1320,7 +1320,7 @@ var ParseTestCases = []struct { }, mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -2577,7 +2577,7 @@ var ParseTestCases = []struct { exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: []string{"app"}, On: true, MatchingLabels: nil}}, mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), Interval: 1 * time.Minute, }, @@ -2589,7 +2589,7 @@ var ParseTestCases = []struct { ), mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), Interval: 1 * time.Minute, }, @@ -2608,7 +2608,7 @@ var ParseTestCases = []struct { exp: mustNewBinOpExpr(OpTypeGT, &BinOpOptions{ReturnBool: true, VectorMatching: &VectorMatching{Card: CardOneToMany, Include: nil, On: true, MatchingLabels: nil}}, mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), Interval: 1 * time.Minute, }, @@ -2620,7 +2620,7 @@ var ParseTestCases = []struct { ), mustNewVectorAggregationExpr( newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: newMatcherExpr([]*labels.Matcher{{Type: labels.MatchEqual, Name: "app", Value: "foo"}}), Interval: 1 * time.Minute, }, @@ -2794,14 +2794,14 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, Operation: "count_over_time", }, RHS: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -2818,7 +2818,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -2841,7 +2841,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{}, }, SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -2854,7 +2854,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, SampleExpr: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &MatchersExpr{Mts: []*labels.Matcher{mustNewMatcher(labels.MatchEqual, "foo", "bar")}}, Interval: 12 * time.Minute, }, @@ -3031,7 +3031,7 @@ var ParseTestCases = []struct { { in: `count_over_time({ foo ="bar" } | json layer7_something_specific="layer7_something_specific" [12m])`, exp: &RangeAggregationExpr{ - Left: &LogRange{ + Left: &LogRangeExpr{ Left: &PipelineExpr{ MultiStages: MultiStageExpr{ newJSONExpressionParser([]log.LabelExtractionExpr{ @@ -3057,7 +3057,7 @@ var ParseTestCases = []struct { VectorMatching: &VectorMatching{Card: CardOneToOne}, }, mustNewVectorAggregationExpr(newRangeAggregationExpr( - &LogRange{ + &LogRangeExpr{ Left: &MatchersExpr{ Mts: []*labels.Matcher{ mustNewMatcher(labels.MatchEqual, "foo", "bar"), @@ -3147,7 +3147,7 @@ var ParseTestCases = []struct { Left: newMatcherExpr([]*labels.Matcher{mustNewMatcher(labels.MatchEqual, "app", "foo")}), MultiStages: MultiStageExpr{ &LineFilterExpr{ - Left: newOrLineFilter( + Left: newOrLineFilterExpr( &LineFilterExpr{ LineFilter: LineFilter{ Ty: log.LineMatchEqual, @@ -3186,7 +3186,7 @@ var ParseTestCases = []struct { Ty: log.LineMatchEqual, Match: "foo", }, - Or: newOrLineFilter( + Or: newOrLineFilterExpr( &LineFilterExpr{ LineFilter: LineFilter{ Ty: log.LineMatchEqual, @@ -3216,7 +3216,7 @@ var ParseTestCases = []struct { Ty: log.LineMatchPattern, Match: "foo", }, - Or: newOrLineFilter( + Or: newOrLineFilterExpr( &LineFilterExpr{ LineFilter: LineFilter{ Ty: log.LineMatchPattern, diff --git a/pkg/logql/syntax/prettier.go b/pkg/logql/syntax/prettier.go index 1b407453858f7..78f6376a6e1e1 100644 --- a/pkg/logql/syntax/prettier.go +++ b/pkg/logql/syntax/prettier.go @@ -109,7 +109,7 @@ func (e *LogfmtParserExpr) Pretty(level int) string { // `| regexp` // `| pattern` // `| unpack` -func (e *LabelParserExpr) Pretty(level int) string { +func (e *LineParserExpr) Pretty(level int) string { return commonPrefixIndent(level, e) } @@ -142,12 +142,12 @@ func (e *LabelFmtExpr) Pretty(level int) string { } // e.g: | json label="expression", another="expression" -func (e *JSONExpressionParser) Pretty(level int) string { +func (e *JSONExpressionParserExpr) Pretty(level int) string { return commonPrefixIndent(level, e) } // e.g: | logfmt label="expression", another="expression" -func (e *LogfmtExpressionParser) Pretty(level int) string { +func (e *LogfmtExpressionParserExpr) Pretty(level int) string { return commonPrefixIndent(level, e) } @@ -168,7 +168,7 @@ func (e *UnwrapExpr) Pretty(level int) string { // e.g: `{foo="bar"}|logfmt[5m]` // TODO(kavi): Rename `LogRange` -> `LogRangeExpr` (to be consistent with other expressions?) -func (e *LogRange) Pretty(level int) string { +func (e *LogRangeExpr) Pretty(level int) string { s := e.Left.Pretty(level) if e.Unwrap != nil { diff --git a/pkg/logql/syntax/serialize.go b/pkg/logql/syntax/serialize.go index 4e4362683543e..d36aecd39174a 100644 --- a/pkg/logql/syntax/serialize.go +++ b/pkg/logql/syntax/serialize.go @@ -204,7 +204,7 @@ func (v *JSONSerializer) VisitRangeAggregation(e *RangeAggregationExpr) { v.Flush() } -func (v *JSONSerializer) VisitLogRange(e *LogRange) { +func (v *JSONSerializer) VisitLogRange(e *LogRangeExpr) { v.WriteObjectStart() v.WriteObjectField(IntervalNanos) @@ -306,17 +306,17 @@ func (v *JSONSerializer) VisitPipeline(e *PipelineExpr) { // Below are StageExpr visitors that we are skipping since a pipeline is // serialized as a string. -func (*JSONSerializer) VisitDecolorize(*DecolorizeExpr) {} -func (*JSONSerializer) VisitDropLabels(*DropLabelsExpr) {} -func (*JSONSerializer) VisitJSONExpressionParser(*JSONExpressionParser) {} -func (*JSONSerializer) VisitKeepLabel(*KeepLabelsExpr) {} -func (*JSONSerializer) VisitLabelFilter(*LabelFilterExpr) {} -func (*JSONSerializer) VisitLabelFmt(*LabelFmtExpr) {} -func (*JSONSerializer) VisitLabelParser(*LabelParserExpr) {} -func (*JSONSerializer) VisitLineFilter(*LineFilterExpr) {} -func (*JSONSerializer) VisitLineFmt(*LineFmtExpr) {} -func (*JSONSerializer) VisitLogfmtExpressionParser(*LogfmtExpressionParser) {} -func (*JSONSerializer) VisitLogfmtParser(*LogfmtParserExpr) {} +func (*JSONSerializer) VisitDecolorize(*DecolorizeExpr) {} +func (*JSONSerializer) VisitDropLabels(*DropLabelsExpr) {} +func (*JSONSerializer) VisitJSONExpressionParser(*JSONExpressionParserExpr) {} +func (*JSONSerializer) VisitKeepLabel(*KeepLabelsExpr) {} +func (*JSONSerializer) VisitLabelFilter(*LabelFilterExpr) {} +func (*JSONSerializer) VisitLabelFmt(*LabelFmtExpr) {} +func (*JSONSerializer) VisitLabelParser(*LineParserExpr) {} +func (*JSONSerializer) VisitLineFilter(*LineFilterExpr) {} +func (*JSONSerializer) VisitLineFmt(*LineFmtExpr) {} +func (*JSONSerializer) VisitLogfmtExpressionParser(*LogfmtExpressionParserExpr) {} +func (*JSONSerializer) VisitLogfmtParser(*LogfmtParserExpr) {} func encodeGrouping(s *jsoniter.Stream, g *Grouping) { s.WriteObjectStart() @@ -858,8 +858,8 @@ func decodeRangeAgg(iter *jsoniter.Iterator) (*RangeAggregationExpr, error) { return expr, err } -func decodeLogRange(iter *jsoniter.Iterator) (*LogRange, error) { - expr := &LogRange{} +func decodeLogRange(iter *jsoniter.Iterator) (*LogRangeExpr, error) { + expr := &LogRangeExpr{} var err error for f := iter.ReadObject(); f != ""; f = iter.ReadObject() { diff --git a/pkg/logql/syntax/expr.y b/pkg/logql/syntax/syntax.y similarity index 72% rename from pkg/logql/syntax/expr.y rename to pkg/logql/syntax/syntax.y index b32366d65e37e..6b1a3c143d0d6 100644 --- a/pkg/logql/syntax/expr.y +++ b/pkg/logql/syntax/syntax.y @@ -5,142 +5,82 @@ import ( "time" "github.com/prometheus/prometheus/model/labels" "github.com/grafana/loki/v3/pkg/logql/log" - ) %} %union{ - Expr Expr - Filter log.LineMatchType - Grouping *Grouping - Labels []string - LogExpr LogSelectorExpr - LogRangeExpr *LogRange - Matcher *labels.Matcher - Matchers []*labels.Matcher - RangeAggregationExpr SampleExpr - RangeOp string - ConvOp string - Selector []*labels.Matcher - VectorAggregationExpr SampleExpr - VectorExpr *VectorExpr - Vector string - MetricExpr SampleExpr - VectorOp string - FilterOp string - BinOpExpr SampleExpr - LabelReplaceExpr SampleExpr - binOp string - bytes uint64 - str string - duration time.Duration - LiteralExpr *LiteralExpr - BinOpModifier *BinOpOptions - BoolModifier *BinOpOptions - OnOrIgnoringModifier *BinOpOptions - LabelParser *LabelParserExpr - LogfmtParser *LogfmtParserExpr - LineFilters *LineFilterExpr - LineFilter *LineFilterExpr - OrFilter *LineFilterExpr - ParserFlags []string - PipelineExpr MultiStageExpr - PipelineStage StageExpr - BytesFilter log.LabelFilterer - NumberFilter log.LabelFilterer - DurationFilter log.LabelFilterer - LabelFilter log.LabelFilterer - UnitFilter log.LabelFilterer - IPLabelFilter log.LabelFilterer - LineFormatExpr *LineFmtExpr - LabelFormatExpr *LabelFmtExpr - LabelFormat log.LabelFmt - LabelsFormat []log.LabelFmt - - LabelExtractionExpression log.LabelExtractionExpr - LabelExtractionExpressionList []log.LabelExtractionExpr - JSONExpressionParser *JSONExpressionParser - LogfmtExpressionParser *LogfmtExpressionParser - - UnwrapExpr *UnwrapExpr - DecolorizeExpr *DecolorizeExpr - OffsetExpr *OffsetExpr - DropLabel log.DropLabel - DropLabels []log.DropLabel - DropLabelsExpr *DropLabelsExpr - KeepLabel log.KeepLabel - KeepLabels []log.KeepLabel - KeepLabelsExpr *KeepLabelsExpr + val interface{} + bytes uint64 + dur time.Duration + op string + binOp string + str string + strs []string + + expr Expr + logExpr LogSelectorExpr + metricExpr SampleExpr + + matcher *labels.Matcher + matchers []*labels.Matcher + stage StageExpr + stages MultiStageExpr + filterer log.LabelFilterer + filter log.LineMatchType + lineFilterExpr *LineFilterExpr + binOpts *BinOpOptions + namedMatcher log.NamedLabelMatcher + namedMatchers []log.NamedLabelMatcher + labelFormat log.LabelFmt + labelsFormat []log.LabelFmt + grouping *Grouping + logRangeExpr *LogRangeExpr + literalExpr *LiteralExpr + labelFormatExpr *LabelFmtExpr + labelExtractionExpression log.LabelExtractionExpr + labelExtractionExpressionList []log.LabelExtractionExpr + unwrapExpr *UnwrapExpr + offsetExpr *OffsetExpr } %start root -%type expr -%type filter -%type grouping -%type labels -%type logExpr -%type metricExpr -%type logRangeExpr -%type matcher -%type matchers -%type rangeAggregationExpr -%type rangeOp -%type convOp -%type selector -%type vectorAggregationExpr -%type vectorOp -%type vectorExpr -%type vector -%type filterOp -%type binOpExpr -%type literalExpr -%type labelReplaceExpr -%type binOpModifier -%type boolModifier -%type onOrIgnoringModifier -%type labelParser -%type logfmtParser -%type pipelineExpr -%type pipelineStage -%type bytesFilter -%type numberFilter -%type durationFilter -%type labelFilter -%type lineFilters -%type lineFilter -%type orFilter -%type parserFlags -%type lineFormatExpr -%type decolorizeExpr -%type dropLabelsExpr -%type dropLabels -%type dropLabel -%type keepLabelsExpr -%type keepLabels -%type keepLabel -%type labelFormatExpr -%type labelFormat -%type labelsFormat -%type labelExtractionExpression -%type labelExtractionExpressionList -%type logfmtExpressionParser -%type jsonExpressionParser -%type unwrapExpr -%type unitFilter -%type ipLabelFilter -%type offsetExpr +%type expr +%type logExpr +%type metricExpr rangeAggregationExpr vectorAggregationExpr binOpExpr labelReplaceExpr vectorExpr +%type pipelineStage logfmtParser labelParser jsonExpressionParser logfmtExpressionParser lineFormatExpr decolorizeExpr labelFormatExpr dropLabelsExpr keepLabelsExpr +%type pipelineExpr +%type lineFilter lineFilters orFilter +%type rangeOp convOp vectorOp filterOp +%type bytesFilter numberFilter durationFilter labelFilter unitFilter ipLabelFilter +%type filter +%type matcher +%type matchers selector +%type vector +%type labels parserFlags +%type binOpModifier boolModifier onOrIgnoringModifier +%type namedMatcher +%type namedMatchers +%type labelFormat +%type labelsFormat +%type grouping +%type logRangeExpr +%type literalExpr +%type labelExtractionExpression +%type labelExtractionExpressionList +%type unwrapExpr +%type offsetExpr %token BYTES -%token IDENTIFIER STRING NUMBER PARSER_FLAG -%token DURATION RANGE -%token MATCHERS LABELS EQ RE NRE NPA OPEN_BRACE CLOSE_BRACE OPEN_BRACKET CLOSE_BRACKET COMMA DOT PIPE_MATCH PIPE_EXACT PIPE_PATTERN - OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE RATE_COUNTER SUM SORT SORT_DESC AVG - MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK APPROX_TOPK - BYTES_OVER_TIME BYTES_RATE BOOL JSON REGEXP LOGFMT PIPE LINE_FMT LABEL_FMT UNWRAP AVG_OVER_TIME SUM_OVER_TIME MIN_OVER_TIME - MAX_OVER_TIME STDVAR_OVER_TIME STDDEV_OVER_TIME QUANTILE_OVER_TIME BYTES_CONV DURATION_CONV DURATION_SECONDS_CONV - FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME VECTOR LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING GROUP_LEFT GROUP_RIGHT - DECOLORIZE DROP KEEP +%token IDENTIFIER STRING NUMBER FUNCTION_FLAG +%token DURATION RANGE +%token MATCHERS LABELS EQ RE NRE NPA OPEN_BRACE CLOSE_BRACE OPEN_BRACKET CLOSE_BRACKET COMMA DOT PIPE_MATCH PIPE_EXACT PIPE_PATTERN + OPEN_PARENTHESIS CLOSE_PARENTHESIS BY WITHOUT COUNT_OVER_TIME RATE RATE_COUNTER SUM SORT SORT_DESC AVG + MAX MIN COUNT STDDEV STDVAR BOTTOMK TOPK APPROX_TOPK + BYTES_OVER_TIME BYTES_RATE BOOL JSON REGEXP LOGFMT PIPE LINE_FMT LABEL_FMT UNWRAP AVG_OVER_TIME SUM_OVER_TIME MIN_OVER_TIME + MAX_OVER_TIME STDVAR_OVER_TIME STDDEV_OVER_TIME QUANTILE_OVER_TIME BYTES_CONV DURATION_CONV DURATION_SECONDS_CONV + FIRST_OVER_TIME LAST_OVER_TIME ABSENT_OVER_TIME VECTOR LABEL_REPLACE UNPACK OFFSET PATTERN IP ON IGNORING GROUP_LEFT GROUP_RIGHT + DECOLORIZE DROP KEEP // Operators are listed with increasing precedence. %left OR @@ -152,11 +92,18 @@ import ( %% -root: expr { exprlex.(*parser).expr = $1 }; +root: + expr { syntaxlex.(*parser).expr = $1 }; expr: - logExpr { $$ = $1 } - | metricExpr { $$ = $1 } + logExpr { $$ = $1 } + | metricExpr { $$ = $1 } + ; + +logExpr: + selector { $$ = newMatcherExpr($1)} + | selector pipelineExpr { $$ = newPipelineExpr(newMatcherExpr($1), $2)} + | OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2 } ; metricExpr: @@ -169,12 +116,6 @@ metricExpr: | OPEN_PARENTHESIS metricExpr CLOSE_PARENTHESIS { $$ = $2 } ; -logExpr: - selector { $$ = newMatcherExpr($1)} - | selector pipelineExpr { $$ = newPipelineExpr(newMatcherExpr($1), $2)} - | OPEN_PARENTHESIS logExpr CLOSE_PARENTHESIS { $$ = $2 } - ; - logRangeExpr: selector RANGE { $$ = newLogRange(newMatcherExpr($1), $2, nil, nil ) } | selector RANGE offsetExpr { $$ = newLogRange(newMatcherExpr($1), $2, nil, $3 ) } @@ -239,15 +180,6 @@ labelReplaceExpr: { $$ = mustNewLabelReplaceExpr($3, $5, $7, $9, $11)} ; -filter: - PIPE_MATCH { $$ = log.LineMatchRegexp } - | PIPE_EXACT { $$ = log.LineMatchEqual } - | PIPE_PATTERN { $$ = log.LineMatchPattern } - | NRE { $$ = log.LineMatchNotRegexp } - | NEQ { $$ = log.LineMatchNotEqual } - | NPA { $$ = log.LineMatchNotPattern } - ; - selector: OPEN_BRACE matchers CLOSE_BRACE { $$ = $2 } | OPEN_BRACE matchers error { $$ = $2 } @@ -285,31 +217,39 @@ pipelineStage: | PIPE keepLabelsExpr { $$ = $2 } ; +filter: + PIPE_MATCH { $$ = log.LineMatchRegexp } + | PIPE_EXACT { $$ = log.LineMatchEqual } + | PIPE_PATTERN { $$ = log.LineMatchPattern } + | NRE { $$ = log.LineMatchNotRegexp } + | NEQ { $$ = log.LineMatchNotEqual } + | NPA { $$ = log.LineMatchNotPattern } + ; + filterOp: IP { $$ = OpFilterIP } ; orFilter: - STRING { $$ = newLineFilterExpr(log.LineMatchEqual, "", $1) } - | filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr(log.LineMatchEqual, $1, $3) } - | STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr(log.LineMatchEqual, "", $1), $3) } + STRING { $$ = newLineFilterExpr(log.LineMatchEqual, "", $1) } + | STRING OR orFilter { $$ = newOrLineFilterExpr(newLineFilterExpr(log.LineMatchEqual, "", $1), $3) } + | filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr(log.LineMatchEqual, $1, $3) } ; lineFilter: - filter STRING { $$ = newLineFilterExpr($1, "", $2) } - | filter filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr($1, $2, $4) } - | filter STRING OR orFilter { $$ = newOrLineFilter(newLineFilterExpr($1, "", $2), $4) } + filter STRING { $$ = newLineFilterExpr($1, "", $2) } + | filter filterOp OPEN_PARENTHESIS STRING CLOSE_PARENTHESIS { $$ = newLineFilterExpr($1, $2, $4) } + | lineFilter OR orFilter { $$ = newOrLineFilterExpr($1, $3) } ; lineFilters: - lineFilter { $$ = $1 } - | lineFilter OR orFilter { $$ = newOrLineFilter($1, $3)} - | lineFilters lineFilter { $$ = newNestedLineFilterExpr($1, $2) } + lineFilter { $$ = $1 } + | lineFilters lineFilter { $$ = newNestedLineFilterExpr($1, $2) } ; parserFlags: - PARSER_FLAG { $$ = []string{ $1 } } - | parserFlags PARSER_FLAG { $$ = append($1, $2) } + FUNCTION_FLAG { $$ = []string{ $1 } } + | parserFlags FUNCTION_FLAG { $$ = append($1, $2) } ; logfmtParser: @@ -410,27 +350,18 @@ numberFilter: | IDENTIFIER CMP_EQ literalExpr { $$ = log.NewNumericLabelFilter(log.LabelFilterEqual, $1, $3.Val)} ; -dropLabel: - IDENTIFIER { $$ = log.NewDropLabel(nil, $1) } - | matcher { $$ = log.NewDropLabel($1, "") } +namedMatcher: + IDENTIFIER { $$ = log.NewNamedLabelMatcher(nil, $1) } + | matcher { $$ = log.NewNamedLabelMatcher($1, "") } -dropLabels: - dropLabel { $$ = []log.DropLabel{$1}} - | dropLabels COMMA dropLabel { $$ = append($1, $3) } +namedMatchers: + namedMatcher { $$ = []log.NamedLabelMatcher{$1} } + | namedMatchers COMMA namedMatcher { $$ = append($1, $3) } ; -dropLabelsExpr: DROP dropLabels { $$ = newDropLabelsExpr($2) } - -keepLabel: - IDENTIFIER { $$ = log.NewKeepLabel(nil, $1) } - | matcher { $$ = log.NewKeepLabel($1, "") } - -keepLabels: - keepLabel { $$ = []log.KeepLabel{$1}} - | keepLabels COMMA keepLabel { $$ = append($1, $3) } - ; +dropLabelsExpr: DROP namedMatchers { $$ = newDropLabelsExpr($2) } -keepLabelsExpr: KEEP keepLabels { $$ = newKeepLabelsExpr($2) } +keepLabelsExpr: KEEP namedMatchers { $$ = newKeepLabelsExpr($2) } // Operator precedence only works if each of these is listed separately. binOpExpr: diff --git a/pkg/logql/syntax/syntax.y.go b/pkg/logql/syntax/syntax.y.go new file mode 100644 index 0000000000000..7d0a059b885a6 --- /dev/null +++ b/pkg/logql/syntax/syntax.y.go @@ -0,0 +1,1989 @@ +// Code generated by goyacc -l -p syntax -o pkg/logql/syntax/syntax.y.go pkg/logql/syntax/syntax.y. DO NOT EDIT. +package syntax + +import __yyfmt__ "fmt" + +import ( + "github.com/grafana/loki/v3/pkg/logql/log" + "github.com/prometheus/prometheus/model/labels" + "time" +) + +type syntaxSymType struct { + yys int + val interface{} + bytes uint64 + dur time.Duration + op string + binOp string + str string + strs []string + + expr Expr + logExpr LogSelectorExpr + metricExpr SampleExpr + + matcher *labels.Matcher + matchers []*labels.Matcher + stage StageExpr + stages MultiStageExpr + filterer log.LabelFilterer + filter log.LineMatchType + lineFilterExpr *LineFilterExpr + binOpts *BinOpOptions + namedMatcher log.NamedLabelMatcher + namedMatchers []log.NamedLabelMatcher + labelFormat log.LabelFmt + labelsFormat []log.LabelFmt + grouping *Grouping + logRangeExpr *LogRangeExpr + literalExpr *LiteralExpr + labelFormatExpr *LabelFmtExpr + labelExtractionExpression log.LabelExtractionExpr + labelExtractionExpressionList []log.LabelExtractionExpr + unwrapExpr *UnwrapExpr + offsetExpr *OffsetExpr +} + +const BYTES = 57346 +const IDENTIFIER = 57347 +const STRING = 57348 +const NUMBER = 57349 +const FUNCTION_FLAG = 57350 +const DURATION = 57351 +const RANGE = 57352 +const MATCHERS = 57353 +const LABELS = 57354 +const EQ = 57355 +const RE = 57356 +const NRE = 57357 +const NPA = 57358 +const OPEN_BRACE = 57359 +const CLOSE_BRACE = 57360 +const OPEN_BRACKET = 57361 +const CLOSE_BRACKET = 57362 +const COMMA = 57363 +const DOT = 57364 +const PIPE_MATCH = 57365 +const PIPE_EXACT = 57366 +const PIPE_PATTERN = 57367 +const OPEN_PARENTHESIS = 57368 +const CLOSE_PARENTHESIS = 57369 +const BY = 57370 +const WITHOUT = 57371 +const COUNT_OVER_TIME = 57372 +const RATE = 57373 +const RATE_COUNTER = 57374 +const SUM = 57375 +const SORT = 57376 +const SORT_DESC = 57377 +const AVG = 57378 +const MAX = 57379 +const MIN = 57380 +const COUNT = 57381 +const STDDEV = 57382 +const STDVAR = 57383 +const BOTTOMK = 57384 +const TOPK = 57385 +const APPROX_TOPK = 57386 +const BYTES_OVER_TIME = 57387 +const BYTES_RATE = 57388 +const BOOL = 57389 +const JSON = 57390 +const REGEXP = 57391 +const LOGFMT = 57392 +const PIPE = 57393 +const LINE_FMT = 57394 +const LABEL_FMT = 57395 +const UNWRAP = 57396 +const AVG_OVER_TIME = 57397 +const SUM_OVER_TIME = 57398 +const MIN_OVER_TIME = 57399 +const MAX_OVER_TIME = 57400 +const STDVAR_OVER_TIME = 57401 +const STDDEV_OVER_TIME = 57402 +const QUANTILE_OVER_TIME = 57403 +const BYTES_CONV = 57404 +const DURATION_CONV = 57405 +const DURATION_SECONDS_CONV = 57406 +const FIRST_OVER_TIME = 57407 +const LAST_OVER_TIME = 57408 +const ABSENT_OVER_TIME = 57409 +const VECTOR = 57410 +const LABEL_REPLACE = 57411 +const UNPACK = 57412 +const OFFSET = 57413 +const PATTERN = 57414 +const IP = 57415 +const ON = 57416 +const IGNORING = 57417 +const GROUP_LEFT = 57418 +const GROUP_RIGHT = 57419 +const DECOLORIZE = 57420 +const DROP = 57421 +const KEEP = 57422 +const OR = 57423 +const AND = 57424 +const UNLESS = 57425 +const CMP_EQ = 57426 +const NEQ = 57427 +const LT = 57428 +const LTE = 57429 +const GT = 57430 +const GTE = 57431 +const ADD = 57432 +const SUB = 57433 +const MUL = 57434 +const DIV = 57435 +const MOD = 57436 +const POW = 57437 + +var syntaxToknames = [...]string{ + "$end", + "error", + "$unk", + "BYTES", + "IDENTIFIER", + "STRING", + "NUMBER", + "FUNCTION_FLAG", + "DURATION", + "RANGE", + "MATCHERS", + "LABELS", + "EQ", + "RE", + "NRE", + "NPA", + "OPEN_BRACE", + "CLOSE_BRACE", + "OPEN_BRACKET", + "CLOSE_BRACKET", + "COMMA", + "DOT", + "PIPE_MATCH", + "PIPE_EXACT", + "PIPE_PATTERN", + "OPEN_PARENTHESIS", + "CLOSE_PARENTHESIS", + "BY", + "WITHOUT", + "COUNT_OVER_TIME", + "RATE", + "RATE_COUNTER", + "SUM", + "SORT", + "SORT_DESC", + "AVG", + "MAX", + "MIN", + "COUNT", + "STDDEV", + "STDVAR", + "BOTTOMK", + "TOPK", + "APPROX_TOPK", + "BYTES_OVER_TIME", + "BYTES_RATE", + "BOOL", + "JSON", + "REGEXP", + "LOGFMT", + "PIPE", + "LINE_FMT", + "LABEL_FMT", + "UNWRAP", + "AVG_OVER_TIME", + "SUM_OVER_TIME", + "MIN_OVER_TIME", + "MAX_OVER_TIME", + "STDVAR_OVER_TIME", + "STDDEV_OVER_TIME", + "QUANTILE_OVER_TIME", + "BYTES_CONV", + "DURATION_CONV", + "DURATION_SECONDS_CONV", + "FIRST_OVER_TIME", + "LAST_OVER_TIME", + "ABSENT_OVER_TIME", + "VECTOR", + "LABEL_REPLACE", + "UNPACK", + "OFFSET", + "PATTERN", + "IP", + "ON", + "IGNORING", + "GROUP_LEFT", + "GROUP_RIGHT", + "DECOLORIZE", + "DROP", + "KEEP", + "OR", + "AND", + "UNLESS", + "CMP_EQ", + "NEQ", + "LT", + "LTE", + "GT", + "GTE", + "ADD", + "SUB", + "MUL", + "DIV", + "MOD", + "POW", +} +var syntaxStatenames = [...]string{} + +const syntaxEofCode = 1 +const syntaxErrCode = 2 +const syntaxInitialStackSize = 16 + +var syntaxExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, +} + +const syntaxPrivate = 57344 + +const syntaxLast = 656 + +var syntaxAct = [...]int{ + + 284, 225, 85, 4, 212, 65, 183, 127, 201, 190, + 77, 198, 234, 64, 200, 5, 153, 188, 78, 2, + 57, 81, 49, 50, 51, 58, 59, 62, 63, 60, + 61, 52, 53, 54, 55, 56, 57, 10, 50, 51, + 58, 59, 62, 63, 60, 61, 52, 53, 54, 55, + 56, 57, 58, 59, 62, 63, 60, 61, 52, 53, + 54, 55, 56, 57, 52, 53, 54, 55, 56, 57, + 110, 279, 140, 287, 116, 54, 55, 56, 57, 262, + 213, 218, 16, 292, 261, 149, 151, 152, 157, 289, + 258, 141, 217, 16, 162, 257, 167, 168, 214, 155, + 277, 165, 166, 16, 68, 276, 360, 205, 151, 152, + 360, 164, 95, 357, 221, 169, 170, 171, 172, 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 274, + 287, 380, 16, 137, 273, 86, 87, 271, 195, 325, + 16, 192, 270, 203, 203, 334, 137, 143, 260, 185, + 268, 204, 321, 16, 131, 267, 216, 150, 143, 256, + 333, 375, 185, 232, 368, 17, 18, 131, 142, 226, + 288, 111, 228, 229, 237, 236, 17, 18, 211, 206, + 209, 210, 207, 208, 367, 265, 17, 18, 16, 301, + 264, 245, 246, 247, 301, 350, 137, 311, 73, 75, + 349, 289, 336, 337, 338, 249, 70, 71, 72, 186, + 184, 289, 185, 365, 353, 17, 18, 131, 84, 320, + 86, 87, 343, 17, 18, 282, 285, 324, 291, 236, + 294, 299, 110, 297, 116, 298, 17, 18, 286, 155, + 283, 333, 295, 259, 263, 266, 269, 272, 275, 278, + 240, 309, 288, 301, 305, 307, 310, 312, 363, 348, + 313, 290, 221, 203, 319, 315, 73, 75, 74, 340, + 230, 17, 18, 184, 70, 71, 72, 301, 341, 301, + 145, 236, 289, 347, 322, 303, 326, 296, 328, 330, + 236, 332, 110, 289, 224, 236, 331, 342, 327, 73, + 75, 110, 227, 308, 344, 236, 221, 70, 71, 72, + 301, 293, 306, 154, 144, 13, 302, 238, 137, 280, + 244, 137, 243, 13, 156, 242, 378, 235, 354, 355, + 374, 222, 156, 110, 356, 227, 74, 185, 241, 131, + 358, 359, 131, 252, 215, 161, 364, 160, 159, 91, + 90, 83, 346, 290, 16, 250, 147, 300, 73, 75, + 370, 255, 371, 372, 13, 253, 70, 71, 72, 74, + 239, 231, 146, 6, 376, 148, 223, 21, 22, 23, + 36, 45, 46, 37, 39, 40, 38, 41, 42, 43, + 44, 47, 24, 25, 227, 254, 82, 186, 184, 251, + 373, 329, 26, 27, 28, 29, 30, 31, 32, 80, + 362, 361, 33, 34, 35, 48, 19, 233, 224, 339, + 317, 318, 191, 73, 75, 248, 191, 13, 74, 189, + 163, 70, 71, 72, 89, 88, 6, 17, 18, 379, + 21, 22, 23, 36, 45, 46, 37, 39, 40, 38, + 41, 42, 43, 44, 47, 24, 25, 3, 377, 227, + 366, 352, 351, 323, 76, 26, 27, 28, 29, 30, + 31, 32, 314, 304, 281, 33, 34, 35, 48, 19, + 158, 316, 220, 219, 199, 369, 73, 75, 218, 217, + 13, 196, 194, 74, 70, 71, 72, 193, 345, 6, + 17, 18, 137, 21, 22, 23, 36, 45, 46, 37, + 39, 40, 38, 41, 42, 43, 44, 47, 24, 25, + 202, 191, 227, 131, 82, 199, 197, 94, 26, 27, + 28, 29, 30, 31, 32, 93, 137, 187, 33, 34, + 35, 48, 19, 20, 79, 123, 124, 122, 69, 132, + 134, 292, 73, 75, 128, 129, 74, 131, 138, 130, + 70, 71, 72, 17, 18, 139, 15, 125, 335, 126, + 14, 66, 92, 121, 120, 133, 135, 136, 119, 123, + 124, 122, 118, 132, 134, 73, 75, 117, 227, 115, + 114, 113, 112, 70, 71, 72, 12, 11, 9, 8, + 7, 125, 1, 126, 0, 0, 0, 0, 287, 133, + 135, 136, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 67, 74, 96, 97, 98, 99, 100, 101, 102, + 103, 104, 105, 106, 107, 108, 109, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 74, +} +var syntaxPact = [...]int{ + + 347, -1000, -59, -1000, -1000, 570, 347, -1000, -1000, -1000, + -1000, -1000, -1000, 391, 325, 192, -1000, 428, 427, 324, + 323, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 65, + 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, + 65, 65, 65, 65, 570, -1000, 183, 531, -9, 85, + -1000, -1000, -1000, -1000, -1000, -1000, 287, 253, -59, 354, + -1000, -1000, 72, 306, 473, 322, 321, 319, -1000, -1000, + 347, 423, 347, 27, 20, -1000, 347, 347, 347, 347, + 347, 347, 347, 347, 347, 347, 347, 347, 347, 347, + -1000, -9, -1000, -1000, -1000, -1000, 128, -1000, -1000, -1000, + -1000, -1000, 421, 516, 491, -1000, 486, -1000, -1000, -1000, + -1000, 313, 485, -1000, 520, 515, 515, 94, -1000, -1000, + 74, -1000, 318, -1000, -1000, -1000, -1000, -1000, 519, 483, + 482, 477, 476, 304, 355, 408, 298, 243, 350, 410, + 300, 290, 349, 223, -44, 312, 299, 296, 294, -32, + -32, -17, -17, -75, -75, -75, -75, -26, -26, -26, + -26, -26, -26, 128, 313, 313, 313, 417, 334, -1000, + -1000, 386, 334, -1000, -1000, 316, -1000, 344, -1000, 382, + 340, -1000, 72, -1000, 340, 86, 75, 181, 146, 133, + 125, 96, -1000, -10, 293, 468, -1000, -1000, -1000, -1000, + -1000, -1000, 107, 298, 537, 160, 343, 497, 284, 260, + 107, 347, 204, 336, 289, -1000, -1000, 258, -1000, 467, + -1000, 285, 276, 224, 170, 141, 128, 191, -1000, 334, + 516, 466, -1000, 479, 415, 515, 193, -1000, -1000, -1000, + 126, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 74, + 457, 200, -1000, 112, 471, 38, 471, 392, 2, 313, + 2, 150, 140, 409, 242, 251, -1000, -1000, 195, -1000, + 347, 493, -1000, -1000, 331, 256, -1000, 232, -1000, -1000, + 173, -1000, 168, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 456, 455, -1000, 187, -1000, 107, 38, 471, 38, -1000, + -1000, 128, -1000, 2, -1000, 87, -1000, -1000, -1000, 59, + 401, 400, 231, 107, 186, -1000, 454, -1000, -1000, -1000, + -1000, 157, 137, -1000, -1000, 38, -1000, 480, 55, 38, + 29, 2, 2, 390, -1000, -1000, 309, -1000, -1000, 134, + 38, -1000, -1000, 2, 452, -1000, -1000, 305, 433, 104, + -1000, +} +var syntaxPgo = [...]int{ + + 0, 602, 18, 457, 3, 600, 599, 598, 597, 596, + 5, 592, 591, 590, 589, 587, 582, 578, 574, 573, + 13, 104, 571, 4, 570, 568, 566, 98, 565, 559, + 558, 6, 555, 554, 548, 7, 544, 15, 543, 12, + 537, 572, 535, 527, 8, 14, 11, 526, 2, 16, + 37, 9, 17, 1, 0, +} +var syntaxR1 = [...]int{ + + 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, + 4, 4, 4, 4, 49, 49, 49, 49, 49, 49, + 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, + 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, + 53, 53, 53, 25, 25, 25, 5, 5, 5, 5, + 6, 6, 6, 6, 6, 6, 8, 37, 37, 37, + 36, 36, 35, 35, 35, 35, 20, 20, 10, 10, + 10, 10, 10, 10, 10, 10, 10, 10, 10, 34, + 34, 34, 34, 34, 34, 27, 23, 23, 23, 21, + 21, 21, 22, 22, 40, 40, 11, 11, 12, 12, + 12, 12, 13, 14, 14, 15, 16, 46, 46, 47, + 47, 47, 17, 31, 31, 31, 31, 31, 31, 31, + 31, 31, 51, 51, 52, 52, 33, 33, 32, 32, + 30, 30, 30, 30, 30, 30, 30, 28, 28, 28, + 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, + 29, 44, 44, 45, 45, 18, 19, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 42, 42, 43, 43, 43, 43, 41, 41, + 41, 41, 41, 41, 41, 41, 50, 50, 50, 9, + 38, 26, 26, 26, 26, 26, 26, 26, 26, 26, + 26, 26, 26, 24, 24, 24, 24, 24, 24, 24, + 24, 24, 24, 24, 24, 24, 24, 24, 54, 39, + 39, 48, 48, 48, 48, +} +var syntaxR2 = [...]int{ + + 0, 1, 1, 1, 1, 2, 3, 1, 1, 1, + 1, 1, 1, 3, 2, 3, 4, 5, 3, 4, + 5, 6, 3, 4, 5, 6, 3, 4, 5, 6, + 4, 5, 6, 7, 3, 4, 4, 5, 3, 2, + 3, 6, 3, 1, 1, 1, 4, 6, 5, 7, + 4, 5, 5, 6, 7, 7, 12, 3, 3, 2, + 1, 3, 3, 3, 3, 3, 1, 2, 1, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, + 1, 1, 1, 1, 1, 1, 1, 3, 4, 2, + 5, 3, 1, 2, 1, 2, 1, 2, 1, 2, + 1, 2, 2, 3, 2, 2, 1, 3, 3, 1, + 3, 3, 2, 1, 1, 1, 1, 3, 2, 3, + 3, 3, 3, 1, 1, 3, 6, 6, 1, 1, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 1, 1, 1, 3, 2, 2, 4, 4, 4, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + 4, 4, 0, 1, 5, 4, 5, 4, 1, 1, + 2, 4, 5, 2, 4, 5, 1, 2, 2, 4, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, + 3, 4, 4, 3, 3, +} +var syntaxChk = [...]int{ + + -1000, -1, -2, -3, -4, -37, 26, -5, -6, -7, + -50, -8, -9, 17, -24, -26, 7, 90, 91, 69, + -38, 30, 31, 32, 45, 46, 55, 56, 57, 58, + 59, 60, 61, 65, 66, 67, 33, 36, 39, 37, + 38, 40, 41, 42, 43, 34, 35, 44, 68, 81, + 82, 83, 90, 91, 92, 93, 94, 95, 84, 85, + 88, 89, 86, 87, -20, -10, -22, 51, -21, -34, + 23, 24, 25, 15, 85, 16, -3, -4, -2, -36, + 18, -35, 5, 26, 26, -48, 28, 29, 7, 7, + 26, 26, -41, -42, -43, 47, -41, -41, -41, -41, + -41, -41, -41, -41, -41, -41, -41, -41, -41, -41, + -10, -21, -11, -12, -13, -14, -31, -15, -16, -17, + -18, -19, 50, 48, 49, 70, 72, -35, -33, -32, + -29, 26, 52, 78, 53, 79, 80, 5, -30, -28, + 81, 6, -27, 73, 27, 27, 18, 2, 21, 13, + 85, 14, 15, -49, 7, -37, 26, -4, 7, 26, + 26, 26, -4, 7, -2, 74, 75, 76, 77, -2, + -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, + -2, -2, -2, -31, 82, 21, 81, -40, -52, 8, + -51, 5, -52, 6, 6, -31, 6, -47, -46, 5, + -45, -44, 5, -35, -45, 13, 85, 88, 89, 86, + 87, 84, -23, 6, -27, 26, -35, 6, 6, 6, + 6, 2, 27, 21, 10, -53, -20, 51, -37, -49, + 27, 21, -4, 7, -39, 27, 5, -39, 27, 21, + 27, 26, 26, 26, 26, -31, -31, -31, 8, -52, + 21, 13, 27, 21, 13, 21, 73, 9, 4, -50, + 73, 9, 4, -50, 9, 4, -50, 9, 4, -50, + 9, 4, -50, 9, 4, -50, 9, 4, -50, 81, + 26, 6, -48, -49, -54, -53, -20, 71, 10, 51, + 10, -53, 54, 27, -53, -20, 27, -48, -4, 27, + 21, 21, 27, 27, 6, -39, 27, -39, 27, 27, + -39, 27, -39, -51, 6, -46, 2, 5, 6, -44, + 26, 26, -23, 6, 27, 27, -53, -20, -53, 9, + -54, -31, -54, 10, 5, -25, 62, 63, 64, 10, + 27, 27, -53, 27, -4, 5, 21, 27, 27, 27, + 27, 6, 6, 27, -48, -53, -54, 26, -54, -53, + 51, 10, 10, 27, -48, 27, 6, 27, 27, 5, + -53, -54, -54, 10, 21, 27, -54, 6, 21, 6, + 27, +} +var syntaxDef = [...]int{ + + 0, -2, 1, 2, 3, 4, 0, 7, 8, 9, + 10, 11, 12, 0, 0, 0, 186, 0, 0, 0, + 0, 203, 204, 205, 206, 207, 208, 209, 210, 211, + 212, 213, 214, 215, 216, 217, 191, 192, 193, 194, + 195, 196, 197, 198, 199, 200, 201, 202, 190, 172, + 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, + 172, 172, 172, 172, 5, 66, 68, 0, 92, 0, + 79, 80, 81, 82, 83, 84, 2, 3, 0, 0, + 59, 60, 0, 0, 0, 0, 0, 0, 187, 188, + 0, 0, 0, 178, 179, 173, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 67, 93, 69, 70, 71, 72, 73, 74, 75, 76, + 77, 78, 96, 98, 0, 100, 0, 113, 114, 115, + 116, 0, 0, 106, 0, 0, 0, 0, 128, 129, + 0, 89, 0, 85, 6, 13, 57, 58, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 3, 186, 0, + 0, 0, 3, 0, 157, 0, 0, 180, 183, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 118, 0, 0, 0, 97, 104, 94, + 124, 123, 102, 99, 101, 0, 105, 112, 109, 0, + 155, 153, 151, 152, 156, 0, 0, 0, 0, 0, + 0, 0, 91, 86, 0, 0, 61, 62, 63, 64, + 65, 39, 46, 0, 14, 0, 0, 0, 0, 0, + 50, 0, 3, 186, 0, 223, 219, 0, 224, 0, + 189, 0, 0, 0, 0, 119, 120, 121, 95, 103, + 0, 0, 117, 0, 0, 0, 0, 135, 142, 149, + 0, 134, 141, 148, 130, 137, 144, 131, 138, 145, + 132, 139, 146, 133, 140, 147, 136, 143, 150, 0, + 0, 0, 48, 0, 15, 18, 34, 0, 22, 0, + 26, 0, 0, 0, 0, 0, 38, 52, 3, 51, + 0, 0, 221, 222, 0, 0, 175, 0, 177, 181, + 0, 184, 0, 125, 122, 110, 111, 107, 108, 154, + 0, 0, 87, 0, 90, 47, 19, 35, 36, 218, + 23, 42, 27, 30, 40, 0, 43, 44, 45, 16, + 0, 0, 0, 53, 3, 220, 0, 174, 176, 182, + 185, 0, 0, 88, 49, 37, 31, 0, 17, 20, + 0, 24, 28, 0, 54, 55, 0, 126, 127, 0, + 21, 25, 29, 32, 0, 41, 33, 0, 0, 0, + 56, +} +var syntaxTok1 = [...]int{ + + 1, +} +var syntaxTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, + 92, 93, 94, 95, +} +var syntaxTok3 = [...]int{ + 0, +} + +var syntaxErrorMessages = [...]struct { + state int + token int + msg string +}{} + +/* parser for yacc output */ + +var ( + syntaxDebug = 0 + syntaxErrorVerbose = false +) + +type syntaxLexer interface { + Lex(lval *syntaxSymType) int + Error(s string) +} + +type syntaxParser interface { + Parse(syntaxLexer) int + Lookahead() int +} + +type syntaxParserImpl struct { + lval syntaxSymType + stack [syntaxInitialStackSize]syntaxSymType + char int +} + +func (p *syntaxParserImpl) Lookahead() int { + return p.char +} + +func syntaxNewParser() syntaxParser { + return &syntaxParserImpl{} +} + +const syntaxFlag = -1000 + +func syntaxTokname(c int) string { + if c >= 1 && c-1 < len(syntaxToknames) { + if syntaxToknames[c-1] != "" { + return syntaxToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func syntaxStatname(s int) string { + if s >= 0 && s < len(syntaxStatenames) { + if syntaxStatenames[s] != "" { + return syntaxStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func syntaxErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !syntaxErrorVerbose { + return "syntax error" + } + + for _, e := range syntaxErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + syntaxTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := syntaxPact[state] + for tok := TOKSTART; tok-1 < len(syntaxToknames); tok++ { + if n := base + tok; n >= 0 && n < syntaxLast && syntaxChk[syntaxAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if syntaxDef[state] == -2 { + i := 0 + for syntaxExca[i] != -1 || syntaxExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; syntaxExca[i] >= 0; i += 2 { + tok := syntaxExca[i] + if tok < TOKSTART || syntaxExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if syntaxExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += syntaxTokname(tok) + } + return res +} + +func syntaxlex1(lex syntaxLexer, lval *syntaxSymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = syntaxTok1[0] + goto out + } + if char < len(syntaxTok1) { + token = syntaxTok1[char] + goto out + } + if char >= syntaxPrivate { + if char < syntaxPrivate+len(syntaxTok2) { + token = syntaxTok2[char-syntaxPrivate] + goto out + } + } + for i := 0; i < len(syntaxTok3); i += 2 { + token = syntaxTok3[i+0] + if token == char { + token = syntaxTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = syntaxTok2[1] /* unknown char */ + } + if syntaxDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", syntaxTokname(token), uint(char)) + } + return char, token +} + +func syntaxParse(syntaxlex syntaxLexer) int { + return syntaxNewParser().Parse(syntaxlex) +} + +func (syntaxrcvr *syntaxParserImpl) Parse(syntaxlex syntaxLexer) int { + var syntaxn int + var syntaxVAL syntaxSymType + var syntaxDollar []syntaxSymType + _ = syntaxDollar // silence set and not used + syntaxS := syntaxrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + syntaxstate := 0 + syntaxrcvr.char = -1 + syntaxtoken := -1 // syntaxrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + syntaxstate = -1 + syntaxrcvr.char = -1 + syntaxtoken = -1 + }() + syntaxp := -1 + goto syntaxstack + +ret0: + return 0 + +ret1: + return 1 + +syntaxstack: + /* put a state and value onto the stack */ + if syntaxDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", syntaxTokname(syntaxtoken), syntaxStatname(syntaxstate)) + } + + syntaxp++ + if syntaxp >= len(syntaxS) { + nyys := make([]syntaxSymType, len(syntaxS)*2) + copy(nyys, syntaxS) + syntaxS = nyys + } + syntaxS[syntaxp] = syntaxVAL + syntaxS[syntaxp].yys = syntaxstate + +syntaxnewstate: + syntaxn = syntaxPact[syntaxstate] + if syntaxn <= syntaxFlag { + goto syntaxdefault /* simple state */ + } + if syntaxrcvr.char < 0 { + syntaxrcvr.char, syntaxtoken = syntaxlex1(syntaxlex, &syntaxrcvr.lval) + } + syntaxn += syntaxtoken + if syntaxn < 0 || syntaxn >= syntaxLast { + goto syntaxdefault + } + syntaxn = syntaxAct[syntaxn] + if syntaxChk[syntaxn] == syntaxtoken { /* valid shift */ + syntaxrcvr.char = -1 + syntaxtoken = -1 + syntaxVAL = syntaxrcvr.lval + syntaxstate = syntaxn + if Errflag > 0 { + Errflag-- + } + goto syntaxstack + } + +syntaxdefault: + /* default state action */ + syntaxn = syntaxDef[syntaxstate] + if syntaxn == -2 { + if syntaxrcvr.char < 0 { + syntaxrcvr.char, syntaxtoken = syntaxlex1(syntaxlex, &syntaxrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if syntaxExca[xi+0] == -1 && syntaxExca[xi+1] == syntaxstate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + syntaxn = syntaxExca[xi+0] + if syntaxn < 0 || syntaxn == syntaxtoken { + break + } + } + syntaxn = syntaxExca[xi+1] + if syntaxn < 0 { + goto ret0 + } + } + if syntaxn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + syntaxlex.Error(syntaxErrorMessage(syntaxstate, syntaxtoken)) + Nerrs++ + if syntaxDebug >= 1 { + __yyfmt__.Printf("%s", syntaxStatname(syntaxstate)) + __yyfmt__.Printf(" saw %s\n", syntaxTokname(syntaxtoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for syntaxp >= 0 { + syntaxn = syntaxPact[syntaxS[syntaxp].yys] + syntaxErrCode + if syntaxn >= 0 && syntaxn < syntaxLast { + syntaxstate = syntaxAct[syntaxn] /* simulate a shift of "error" */ + if syntaxChk[syntaxstate] == syntaxErrCode { + goto syntaxstack + } + } + + /* the current p has no shift on "error", pop stack */ + if syntaxDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", syntaxS[syntaxp].yys) + } + syntaxp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if syntaxDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", syntaxTokname(syntaxtoken)) + } + if syntaxtoken == syntaxEofCode { + goto ret1 + } + syntaxrcvr.char = -1 + syntaxtoken = -1 + goto syntaxnewstate /* try again in the same state */ + } + } + + /* reduction by production syntaxn */ + if syntaxDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", syntaxn, syntaxStatname(syntaxstate)) + } + + syntaxnt := syntaxn + syntaxpt := syntaxp + _ = syntaxpt // guard against "declared and not used" + + syntaxp -= syntaxR2[syntaxn] + // syntaxp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if syntaxp+1 >= len(syntaxS) { + nyys := make([]syntaxSymType, len(syntaxS)*2) + copy(nyys, syntaxS) + syntaxS = nyys + } + syntaxVAL = syntaxS[syntaxp+1] + + /* consult goto table to find next state */ + syntaxn = syntaxR1[syntaxn] + syntaxg := syntaxPgo[syntaxn] + syntaxj := syntaxg + syntaxS[syntaxp].yys + 1 + + if syntaxj >= syntaxLast { + syntaxstate = syntaxAct[syntaxg] + } else { + syntaxstate = syntaxAct[syntaxj] + if syntaxChk[syntaxstate] != -syntaxn { + syntaxstate = syntaxAct[syntaxg] + } + } + // dummy call; replaced with literal code + switch syntaxnt { + + case 1: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxlex.(*parser).expr = syntaxDollar[1].expr + } + case 2: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.expr = syntaxDollar[1].logExpr + } + case 3: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.expr = syntaxDollar[1].metricExpr + } + case 4: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.logExpr = newMatcherExpr(syntaxDollar[1].matchers) + } + case 5: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.logExpr = newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].stages) + } + case 6: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logExpr = syntaxDollar[2].logExpr + } + case 7: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].metricExpr + } + case 8: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].metricExpr + } + case 9: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].metricExpr + } + case 10: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].literalExpr + } + case 11: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].metricExpr + } + case 12: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[1].metricExpr + } + case 13: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.metricExpr = syntaxDollar[2].metricExpr + } + case 14: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].dur, nil, nil) + } + case 15: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].dur, nil, syntaxDollar[3].offsetExpr) + } + case 16: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[4].dur, nil, nil) + } + case 17: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[4].dur, nil, syntaxDollar[5].offsetExpr) + } + case 18: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].dur, syntaxDollar[3].unwrapExpr, nil) + } + case 19: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].dur, syntaxDollar[4].unwrapExpr, syntaxDollar[3].offsetExpr) + } + case 20: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[4].dur, syntaxDollar[5].unwrapExpr, nil) + } + case 21: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[4].dur, syntaxDollar[6].unwrapExpr, syntaxDollar[5].offsetExpr) + } + case 22: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[3].dur, syntaxDollar[2].unwrapExpr, nil) + } + case 23: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[3].dur, syntaxDollar[2].unwrapExpr, syntaxDollar[4].offsetExpr) + } + case 24: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[5].dur, syntaxDollar[3].unwrapExpr, nil) + } + case 25: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[5].dur, syntaxDollar[3].unwrapExpr, syntaxDollar[6].offsetExpr) + } + case 26: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].stages), syntaxDollar[3].dur, nil, nil) + } + case 27: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].stages), syntaxDollar[3].dur, nil, syntaxDollar[4].offsetExpr) + } + case 28: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[3].stages), syntaxDollar[5].dur, nil, nil) + } + case 29: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[3].stages), syntaxDollar[5].dur, nil, syntaxDollar[6].offsetExpr) + } + case 30: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].stages), syntaxDollar[4].dur, syntaxDollar[3].unwrapExpr, nil) + } + case 31: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[2].stages), syntaxDollar[4].dur, syntaxDollar[3].unwrapExpr, syntaxDollar[5].offsetExpr) + } + case 32: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[3].stages), syntaxDollar[6].dur, syntaxDollar[4].unwrapExpr, nil) + } + case 33: + syntaxDollar = syntaxS[syntaxpt-7 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[2].matchers), syntaxDollar[3].stages), syntaxDollar[6].dur, syntaxDollar[4].unwrapExpr, syntaxDollar[7].offsetExpr) + } + case 34: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[3].stages), syntaxDollar[2].dur, nil, nil) + } + case 35: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[4].stages), syntaxDollar[2].dur, nil, syntaxDollar[3].offsetExpr) + } + case 36: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[3].stages), syntaxDollar[2].dur, syntaxDollar[4].unwrapExpr, nil) + } + case 37: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = newLogRange(newPipelineExpr(newMatcherExpr(syntaxDollar[1].matchers), syntaxDollar[4].stages), syntaxDollar[2].dur, syntaxDollar[5].unwrapExpr, syntaxDollar[3].offsetExpr) + } + case 38: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.logRangeExpr = syntaxDollar[2].logRangeExpr + } + case 40: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.unwrapExpr = newUnwrapExpr(syntaxDollar[3].str, "") + } + case 41: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.unwrapExpr = newUnwrapExpr(syntaxDollar[5].str, syntaxDollar[3].op) + } + case 42: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.unwrapExpr = syntaxDollar[1].unwrapExpr.addPostFilter(syntaxDollar[3].filterer) + } + case 43: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpConvBytes + } + case 44: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpConvDuration + } + case 45: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpConvDurationSeconds + } + case 46: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = newRangeAggregationExpr(syntaxDollar[3].logRangeExpr, syntaxDollar[1].op, nil, nil) + } + case 47: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.metricExpr = newRangeAggregationExpr(syntaxDollar[5].logRangeExpr, syntaxDollar[1].op, nil, &syntaxDollar[3].str) + } + case 48: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.metricExpr = newRangeAggregationExpr(syntaxDollar[3].logRangeExpr, syntaxDollar[1].op, syntaxDollar[5].grouping, nil) + } + case 49: + syntaxDollar = syntaxS[syntaxpt-7 : syntaxpt+1] + { + syntaxVAL.metricExpr = newRangeAggregationExpr(syntaxDollar[5].logRangeExpr, syntaxDollar[1].op, syntaxDollar[7].grouping, &syntaxDollar[3].str) + } + case 50: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[3].metricExpr, syntaxDollar[1].op, nil, nil) + } + case 51: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[4].metricExpr, syntaxDollar[1].op, syntaxDollar[2].grouping, nil) + } + case 52: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[3].metricExpr, syntaxDollar[1].op, syntaxDollar[5].grouping, nil) + } + case 53: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[5].metricExpr, syntaxDollar[1].op, nil, &syntaxDollar[3].str) + } + case 54: + syntaxDollar = syntaxS[syntaxpt-7 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[5].metricExpr, syntaxDollar[1].op, syntaxDollar[7].grouping, &syntaxDollar[3].str) + } + case 55: + syntaxDollar = syntaxS[syntaxpt-7 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewVectorAggregationExpr(syntaxDollar[6].metricExpr, syntaxDollar[1].op, syntaxDollar[2].grouping, &syntaxDollar[4].str) + } + case 56: + syntaxDollar = syntaxS[syntaxpt-12 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewLabelReplaceExpr(syntaxDollar[3].metricExpr, syntaxDollar[5].str, syntaxDollar[7].str, syntaxDollar[9].str, syntaxDollar[11].str) + } + case 57: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matchers = syntaxDollar[2].matchers + } + case 58: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matchers = syntaxDollar[2].matchers + } + case 59: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + } + case 60: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.matchers = []*labels.Matcher{syntaxDollar[1].matcher} + } + case 61: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matchers = append(syntaxDollar[1].matchers, syntaxDollar[3].matcher) + } + case 62: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matcher = mustNewMatcher(labels.MatchEqual, syntaxDollar[1].str, syntaxDollar[3].str) + } + case 63: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matcher = mustNewMatcher(labels.MatchNotEqual, syntaxDollar[1].str, syntaxDollar[3].str) + } + case 64: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matcher = mustNewMatcher(labels.MatchRegexp, syntaxDollar[1].str, syntaxDollar[3].str) + } + case 65: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.matcher = mustNewMatcher(labels.MatchNotRegexp, syntaxDollar[1].str, syntaxDollar[3].str) + } + case 66: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stages = MultiStageExpr{syntaxDollar[1].stage} + } + case 67: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stages = append(syntaxDollar[1].stages, syntaxDollar[2].stage) + } + case 68: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[1].lineFilterExpr + } + case 69: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 70: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 71: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 72: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 73: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = &LabelFilterExpr{LabelFilterer: syntaxDollar[2].filterer} + } + case 74: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 75: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 76: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 77: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 78: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = syntaxDollar[2].stage + } + case 79: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchRegexp + } + case 80: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchEqual + } + case 81: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchPattern + } + case 82: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchNotRegexp + } + case 83: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchNotEqual + } + case 84: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filter = log.LineMatchNotPattern + } + case 85: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpFilterIP + } + case 86: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newLineFilterExpr(log.LineMatchEqual, "", syntaxDollar[1].str) + } + case 87: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newOrLineFilterExpr(newLineFilterExpr(log.LineMatchEqual, "", syntaxDollar[1].str), syntaxDollar[3].lineFilterExpr) + } + case 88: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newLineFilterExpr(log.LineMatchEqual, syntaxDollar[1].op, syntaxDollar[3].str) + } + case 89: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newLineFilterExpr(syntaxDollar[1].filter, "", syntaxDollar[2].str) + } + case 90: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newLineFilterExpr(syntaxDollar[1].filter, syntaxDollar[2].op, syntaxDollar[4].str) + } + case 91: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newOrLineFilterExpr(syntaxDollar[1].lineFilterExpr, syntaxDollar[3].lineFilterExpr) + } + case 92: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = syntaxDollar[1].lineFilterExpr + } + case 93: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.lineFilterExpr = newNestedLineFilterExpr(syntaxDollar[1].lineFilterExpr, syntaxDollar[2].lineFilterExpr) + } + case 94: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.strs = []string{syntaxDollar[1].str} + } + case 95: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.strs = append(syntaxDollar[1].strs, syntaxDollar[2].str) + } + case 96: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stage = newLogfmtParserExpr(nil) + } + case 97: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLogfmtParserExpr(syntaxDollar[2].strs) + } + case 98: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stage = newLabelParserExpr(OpParserTypeJSON, "") + } + case 99: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLabelParserExpr(OpParserTypeRegexp, syntaxDollar[2].str) + } + case 100: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stage = newLabelParserExpr(OpParserTypeUnpack, "") + } + case 101: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLabelParserExpr(OpParserTypePattern, syntaxDollar[2].str) + } + case 102: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newJSONExpressionParser(syntaxDollar[2].labelExtractionExpressionList) + } + case 103: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.stage = newLogfmtExpressionParser(syntaxDollar[3].labelExtractionExpressionList, syntaxDollar[2].strs) + } + case 104: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLogfmtExpressionParser(syntaxDollar[2].labelExtractionExpressionList, nil) + } + case 105: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLineFmtExpr(syntaxDollar[2].str) + } + case 106: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.stage = newDecolorizeExpr() + } + case 107: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.labelFormat = log.NewRenameLabelFmt(syntaxDollar[1].str, syntaxDollar[3].str) + } + case 108: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.labelFormat = log.NewTemplateLabelFmt(syntaxDollar[1].str, syntaxDollar[3].str) + } + case 109: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.labelsFormat = []log.LabelFmt{syntaxDollar[1].labelFormat} + } + case 110: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.labelsFormat = append(syntaxDollar[1].labelsFormat, syntaxDollar[3].labelFormat) + } + case 112: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newLabelFmtExpr(syntaxDollar[2].labelsFormat) + } + case 113: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewStringLabelFilter(syntaxDollar[1].matcher) + } + case 114: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[1].filterer + } + case 115: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[1].filterer + } + case 116: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[1].filterer + } + case 117: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[2].filterer + } + case 118: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewAndLabelFilter(syntaxDollar[1].filterer, syntaxDollar[2].filterer) + } + case 119: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewAndLabelFilter(syntaxDollar[1].filterer, syntaxDollar[3].filterer) + } + case 120: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewAndLabelFilter(syntaxDollar[1].filterer, syntaxDollar[3].filterer) + } + case 121: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewOrLabelFilter(syntaxDollar[1].filterer, syntaxDollar[3].filterer) + } + case 122: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.labelExtractionExpression = log.NewLabelExtractionExpr(syntaxDollar[1].str, syntaxDollar[3].str) + } + case 123: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.labelExtractionExpression = log.NewLabelExtractionExpr(syntaxDollar[1].str, syntaxDollar[1].str) + } + case 124: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.labelExtractionExpressionList = []log.LabelExtractionExpr{syntaxDollar[1].labelExtractionExpression} + } + case 125: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.labelExtractionExpressionList = append(syntaxDollar[1].labelExtractionExpressionList, syntaxDollar[3].labelExtractionExpression) + } + case 126: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewIPLabelFilter(syntaxDollar[5].str, syntaxDollar[1].str, log.LabelFilterEqual) + } + case 127: + syntaxDollar = syntaxS[syntaxpt-6 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewIPLabelFilter(syntaxDollar[5].str, syntaxDollar[1].str, log.LabelFilterNotEqual) + } + case 128: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[1].filterer + } + case 129: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.filterer = syntaxDollar[1].filterer + } + case 130: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterGreaterThan, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 131: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterGreaterThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 132: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterLesserThan, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 133: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterLesserThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 134: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterNotEqual, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 135: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 136: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewDurationLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].dur) + } + case 137: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterGreaterThan, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 138: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterGreaterThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 139: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterLesserThan, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 140: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterLesserThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 141: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterNotEqual, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 142: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 143: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewBytesLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].bytes) + } + case 144: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterGreaterThan, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 145: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterGreaterThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 146: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterLesserThan, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 147: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterLesserThanOrEqual, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 148: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterNotEqual, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 149: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 150: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.filterer = log.NewNumericLabelFilter(log.LabelFilterEqual, syntaxDollar[1].str, syntaxDollar[3].literalExpr.Val) + } + case 151: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.namedMatcher = log.NewNamedLabelMatcher(nil, syntaxDollar[1].str) + } + case 152: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.namedMatcher = log.NewNamedLabelMatcher(syntaxDollar[1].matcher, "") + } + case 153: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.namedMatchers = []log.NamedLabelMatcher{syntaxDollar[1].namedMatcher} + } + case 154: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.namedMatchers = append(syntaxDollar[1].namedMatchers, syntaxDollar[3].namedMatcher) + } + case 155: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newDropLabelsExpr(syntaxDollar[2].namedMatchers) + } + case 156: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.stage = newKeepLabelsExpr(syntaxDollar[2].namedMatchers) + } + case 157: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("or", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 158: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("and", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 159: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("unless", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 160: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("+", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 161: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("-", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 162: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("*", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 163: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("/", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 164: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("%", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 165: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("^", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 166: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("==", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 167: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("!=", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 168: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr(">", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 169: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr(">=", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 170: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("<", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 171: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = mustNewBinOpExpr("<=", syntaxDollar[3].binOpts, syntaxDollar[1].expr, syntaxDollar[4].expr) + } + case 172: + syntaxDollar = syntaxS[syntaxpt-0 : syntaxpt+1] + { + syntaxVAL.binOpts = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}} + } + case 173: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.binOpts = &BinOpOptions{VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true} + } + case 174: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.On = true + syntaxVAL.binOpts.VectorMatching.MatchingLabels = syntaxDollar[4].strs + } + case 175: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.On = true + } + case 176: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.MatchingLabels = syntaxDollar[4].strs + } + case 177: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + } + case 178: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + } + case 179: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + } + case 180: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardManyToOne + } + case 181: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardManyToOne + } + case 182: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardManyToOne + syntaxVAL.binOpts.VectorMatching.Include = syntaxDollar[4].strs + } + case 183: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardOneToMany + } + case 184: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardOneToMany + } + case 185: + syntaxDollar = syntaxS[syntaxpt-5 : syntaxpt+1] + { + syntaxVAL.binOpts = syntaxDollar[1].binOpts + syntaxVAL.binOpts.VectorMatching.Card = CardOneToMany + syntaxVAL.binOpts.VectorMatching.Include = syntaxDollar[4].strs + } + case 186: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.literalExpr = mustNewLiteralExpr(syntaxDollar[1].str, false) + } + case 187: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.literalExpr = mustNewLiteralExpr(syntaxDollar[2].str, false) + } + case 188: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.literalExpr = mustNewLiteralExpr(syntaxDollar[2].str, true) + } + case 189: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.metricExpr = NewVectorExpr(syntaxDollar[3].str) + } + case 190: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.str = OpTypeVector + } + case 191: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeSum + } + case 192: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeAvg + } + case 193: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeCount + } + case 194: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeMax + } + case 195: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeMin + } + case 196: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeStddev + } + case 197: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeStdvar + } + case 198: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeBottomK + } + case 199: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeTopK + } + case 200: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeSort + } + case 201: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeSortDesc + } + case 202: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpTypeApproxTopK + } + case 203: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeCount + } + case 204: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeRate + } + case 205: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeRateCounter + } + case 206: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeBytes + } + case 207: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeBytesRate + } + case 208: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeAvg + } + case 209: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeSum + } + case 210: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeMin + } + case 211: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeMax + } + case 212: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeStdvar + } + case 213: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeStddev + } + case 214: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeQuantile + } + case 215: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeFirst + } + case 216: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeLast + } + case 217: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.op = OpRangeTypeAbsent + } + case 218: + syntaxDollar = syntaxS[syntaxpt-2 : syntaxpt+1] + { + syntaxVAL.offsetExpr = newOffsetExpr(syntaxDollar[2].dur) + } + case 219: + syntaxDollar = syntaxS[syntaxpt-1 : syntaxpt+1] + { + syntaxVAL.strs = []string{syntaxDollar[1].str} + } + case 220: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.strs = append(syntaxDollar[1].strs, syntaxDollar[3].str) + } + case 221: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.grouping = &Grouping{Without: false, Groups: syntaxDollar[3].strs} + } + case 222: + syntaxDollar = syntaxS[syntaxpt-4 : syntaxpt+1] + { + syntaxVAL.grouping = &Grouping{Without: true, Groups: syntaxDollar[3].strs} + } + case 223: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.grouping = &Grouping{Without: false, Groups: nil} + } + case 224: + syntaxDollar = syntaxS[syntaxpt-3 : syntaxpt+1] + { + syntaxVAL.grouping = &Grouping{Without: true, Groups: nil} + } + } + goto syntaxstack /* stack new state and value */ +} diff --git a/pkg/logql/syntax/test_utils.go b/pkg/logql/syntax/test_utils.go index 2083cec0ac43b..c49881acac45f 100644 --- a/pkg/logql/syntax/test_utils.go +++ b/pkg/logql/syntax/test_utils.go @@ -44,7 +44,7 @@ func removeFastRegexMatcherFromExpr(expr Expr) Expr { typed.Mts = RemoveFastRegexMatchers(typed.Mts) case *LabelFilterExpr: typed.LabelFilterer = removeFastRegexMatcherFromLabelFilterer(typed.LabelFilterer) - case *LogRange: + case *LogRangeExpr: if typed.Unwrap == nil { return } diff --git a/pkg/logql/syntax/visit.go b/pkg/logql/syntax/visit.go index 968c5b53b01b5..818b7bd0cecfb 100644 --- a/pkg/logql/syntax/visit.go +++ b/pkg/logql/syntax/visit.go @@ -9,7 +9,7 @@ type RootVisitor interface { LogSelectorExprVisitor StageExprVisitor - VisitLogRange(*LogRange) + VisitLogRange(*LogRangeExpr) } type SampleExprVisitor interface { @@ -31,14 +31,14 @@ type LogSelectorExprVisitor interface { type StageExprVisitor interface { VisitDecolorize(*DecolorizeExpr) VisitDropLabels(*DropLabelsExpr) - VisitJSONExpressionParser(*JSONExpressionParser) + VisitJSONExpressionParser(*JSONExpressionParserExpr) VisitKeepLabel(*KeepLabelsExpr) VisitLabelFilter(*LabelFilterExpr) VisitLabelFmt(*LabelFmtExpr) - VisitLabelParser(*LabelParserExpr) + VisitLabelParser(*LineParserExpr) VisitLineFilter(*LineFilterExpr) VisitLineFmt(*LineFmtExpr) - VisitLogfmtExpressionParser(*LogfmtExpressionParser) + VisitLogfmtExpressionParser(*LogfmtExpressionParserExpr) VisitLogfmtParser(*LogfmtParserExpr) } @@ -48,17 +48,17 @@ type DepthFirstTraversal struct { VisitBinOpFn func(v RootVisitor, e *BinOpExpr) VisitDecolorizeFn func(v RootVisitor, e *DecolorizeExpr) VisitDropLabelsFn func(v RootVisitor, e *DropLabelsExpr) - VisitJSONExpressionParserFn func(v RootVisitor, e *JSONExpressionParser) + VisitJSONExpressionParserFn func(v RootVisitor, e *JSONExpressionParserExpr) VisitKeepLabelFn func(v RootVisitor, e *KeepLabelsExpr) VisitLabelFilterFn func(v RootVisitor, e *LabelFilterExpr) VisitLabelFmtFn func(v RootVisitor, e *LabelFmtExpr) - VisitLabelParserFn func(v RootVisitor, e *LabelParserExpr) + VisitLabelParserFn func(v RootVisitor, e *LineParserExpr) VisitLabelReplaceFn func(v RootVisitor, e *LabelReplaceExpr) VisitLineFilterFn func(v RootVisitor, e *LineFilterExpr) VisitLineFmtFn func(v RootVisitor, e *LineFmtExpr) VisitLiteralFn func(v RootVisitor, e *LiteralExpr) - VisitLogRangeFn func(v RootVisitor, e *LogRange) - VisitLogfmtExpressionParserFn func(v RootVisitor, e *LogfmtExpressionParser) + VisitLogRangeFn func(v RootVisitor, e *LogRangeExpr) + VisitLogfmtExpressionParserFn func(v RootVisitor, e *LogfmtExpressionParserExpr) VisitLogfmtParserFn func(v RootVisitor, e *LogfmtParserExpr) VisitMatchersFn func(v RootVisitor, e *MatchersExpr) VisitPipelineFn func(v RootVisitor, e *PipelineExpr) @@ -101,7 +101,7 @@ func (v *DepthFirstTraversal) VisitDropLabels(e *DropLabelsExpr) { } // VisitJSONExpressionParser implements RootVisitor. -func (v *DepthFirstTraversal) VisitJSONExpressionParser(e *JSONExpressionParser) { +func (v *DepthFirstTraversal) VisitJSONExpressionParser(e *JSONExpressionParserExpr) { if e == nil { return } @@ -141,7 +141,7 @@ func (v *DepthFirstTraversal) VisitLabelFmt(e *LabelFmtExpr) { } // VisitLabelParser implements RootVisitor. -func (v *DepthFirstTraversal) VisitLabelParser(e *LabelParserExpr) { +func (v *DepthFirstTraversal) VisitLabelParser(e *LineParserExpr) { if e == nil { return } @@ -198,7 +198,7 @@ func (v *DepthFirstTraversal) VisitLiteral(e *LiteralExpr) { } // VisitLogRange implements RootVisitor. -func (v *DepthFirstTraversal) VisitLogRange(e *LogRange) { +func (v *DepthFirstTraversal) VisitLogRange(e *LogRangeExpr) { if e == nil { return } @@ -210,7 +210,7 @@ func (v *DepthFirstTraversal) VisitLogRange(e *LogRange) { } // VisitLogfmtExpressionParser implements RootVisitor. -func (v *DepthFirstTraversal) VisitLogfmtExpressionParser(e *LogfmtExpressionParser) { +func (v *DepthFirstTraversal) VisitLogfmtExpressionParser(e *LogfmtExpressionParserExpr) { if e == nil { return } diff --git a/pkg/logql/syntax/visit_test.go b/pkg/logql/syntax/visit_test.go index 445f165d9057a..fbdb5d4387dc7 100644 --- a/pkg/logql/syntax/visit_test.go +++ b/pkg/logql/syntax/visit_test.go @@ -12,7 +12,7 @@ func TestDepthFirstTraversalVisitor(t *testing.T) { visited := [][2]string{} visitor := &DepthFirstTraversal{ - VisitLabelParserFn: func(_ RootVisitor, e *LabelParserExpr) { + VisitLabelParserFn: func(_ RootVisitor, e *LineParserExpr) { visited = append(visited, [2]string{fmt.Sprintf("%T", e), e.String()}) }, VisitLineFilterFn: func(_ RootVisitor, e *LineFilterExpr) { @@ -33,7 +33,7 @@ func TestDepthFirstTraversalVisitor(t *testing.T) { {"*syntax.LogfmtParserExpr", `| logfmt`}, {"*syntax.MatchersExpr", `{env="dev"}`}, {"*syntax.LineFilterExpr", `|~ "(foo|bar)"`}, - {"*syntax.LabelParserExpr", `| json`}, + {"*syntax.LineParserExpr", `| json`}, } query := ` diff --git a/pkg/querier/queryrange/split_by_interval.go b/pkg/querier/queryrange/split_by_interval.go index 701a045270d0b..29b3a3d2e8653 100644 --- a/pkg/querier/queryrange/split_by_interval.go +++ b/pkg/querier/queryrange/split_by_interval.go @@ -271,7 +271,7 @@ func maxRangeVectorAndOffsetDuration(expr syntax.Expr) (time.Duration, time.Dura var maxRVDuration, maxOffset time.Duration expr.Walk(func(e syntax.Expr) { - if r, ok := e.(*syntax.LogRange); ok { + if r, ok := e.(*syntax.LogRangeExpr); ok { if r.Interval > maxRVDuration { maxRVDuration = r.Interval } From 0baa6a709cc829c39c84b6cd5f6b476383696469 Mon Sep 17 00:00:00 2001 From: Christian Haudum Date: Wed, 5 Feb 2025 10:39:29 +0100 Subject: [PATCH 27/33] chore: Update Golang in CI and build env to 1.23.6 (#16098) Signed-off-by: Christian Haudum --- .github/workflows/check.yml | 2 +- .github/workflows/images.yml | 10 +++++----- .github/workflows/minor-release-pr.yml | 6 +++--- .github/workflows/patch-release-pr.yml | 6 +++--- Makefile | 5 +++-- loki-build-image/README.md | 4 ++++ tools/lambda-promtail/Dockerfile | 2 +- 7 files changed, 20 insertions(+), 15 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 4ae4ec778dc6a..dffe7a56b341a 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -2,7 +2,7 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.4" + "build_image": "grafana/loki-build-image:0.34.5" "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false diff --git a/.github/workflows/images.yml b/.github/workflows/images.yml index 8ba365d6e9696..c0a0c3e8f16f8 100644 --- a/.github/workflows/images.yml +++ b/.github/workflows/images.yml @@ -2,7 +2,7 @@ "check": "uses": "grafana/loki-release/.github/workflows/check.yml@main" "with": - "build_image": "grafana/loki-build-image:0.34.4" + "build_image": "grafana/loki-build-image:0.34.5" "golang_ci_lint_version": "v1.60.3" "release_lib_ref": "main" "skip_validation": false @@ -10,7 +10,7 @@ "loki-canary-boringcrypto-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.5" + "GO_VERSION": "1.23.6" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -118,7 +118,7 @@ "loki-canary-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.5" + "GO_VERSION": "1.23.6" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -226,7 +226,7 @@ "loki-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.5" + "GO_VERSION": "1.23.6" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" @@ -334,7 +334,7 @@ "promtail-image": "env": "BUILD_TIMEOUT": 60 - "GO_VERSION": "1.23.5" + "GO_VERSION": "1.23.6" "IMAGE_PREFIX": "grafana" "RELEASE_LIB_REF": "main" "RELEASE_REPO": "grafana/loki" diff --git a/.github/workflows/minor-release-pr.yml b/.github/workflows/minor-release-pr.yml index 80d0bccefedb7..7a15d1367f51b 100644 --- a/.github/workflows/minor-release-pr.yml +++ b/.github/workflows/minor-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.4" + build_image: "grafana/loki-build-image:0.34.5" golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false @@ -144,7 +144,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.4" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.5" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -666,7 +666,7 @@ jobs: build-args: | IMAGE_TAG=${{ needs.version.outputs.version }} GOARCH=${{ steps.platform.outputs.platform_short }} - BUILD_IMAGE=grafana/loki-build-image:0.34.4 + BUILD_IMAGE=grafana/loki-build-image:0.34.5 context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" diff --git a/.github/workflows/patch-release-pr.yml b/.github/workflows/patch-release-pr.yml index cc601fc88d044..a3026c86cc3b0 100644 --- a/.github/workflows/patch-release-pr.yml +++ b/.github/workflows/patch-release-pr.yml @@ -16,7 +16,7 @@ jobs: check: uses: "grafana/loki-release/.github/workflows/check.yml@main" with: - build_image: "grafana/loki-build-image:0.34.4" + build_image: "grafana/loki-build-image:0.34.5" golang_ci_lint_version: "v1.60.3" release_lib_ref: "main" skip_validation: false @@ -144,7 +144,7 @@ jobs: --env SKIP_ARM \ --volume .:/src/loki \ --workdir /src/loki \ - --entrypoint /bin/sh "grafana/loki-build-image:0.34.4" + --entrypoint /bin/sh "grafana/loki-build-image:0.34.5" git config --global --add safe.directory /src/loki echo "${NFPM_SIGNING_KEY}" > $NFPM_SIGNING_KEY_FILE make dist packages @@ -666,7 +666,7 @@ jobs: build-args: | IMAGE_TAG=${{ needs.version.outputs.version }} GOARCH=${{ steps.platform.outputs.platform_short }} - BUILD_IMAGE=grafana/loki-build-image:0.34.4 + BUILD_IMAGE=grafana/loki-build-image:0.34.5 context: "release" file: "release/clients/cmd/docker-driver/Dockerfile" outputs: "type=local,dest=release/plugins/loki-docker-driver-${{ needs.version.outputs.version}}-${{ steps.platform.outputs.platform }}" diff --git a/Makefile b/Makefile index 4f836262e7bfa..26ec3803279b4 100644 --- a/Makefile +++ b/Makefile @@ -18,8 +18,9 @@ BUILD_IN_CONTAINER ?= true CI ?= false # Ensure you run `make release-workflows` after changing this -GO_VERSION := 1.23.5 -BUILD_IMAGE_TAG := 0.34.4 +GO_VERSION := 1.23.6 +# Ensure you run `make IMAGE_TAG= build-image-push` after changing this +BUILD_IMAGE_TAG := 0.34.5 IMAGE_TAG ?= $(shell ./tools/image-tag) GIT_REVISION := $(shell git rev-parse --short HEAD) diff --git a/loki-build-image/README.md b/loki-build-image/README.md index 1a03c4700bca0..95cb72b3846ef 100644 --- a/loki-build-image/README.md +++ b/loki-build-image/README.md @@ -2,6 +2,10 @@ ## Versions +### 0.34.5 + +- Update to Go 1.23.6 + ### 0.34.4 - Update to Go 1.23.5 diff --git a/tools/lambda-promtail/Dockerfile b/tools/lambda-promtail/Dockerfile index a4dc4c4371af6..d59fdf59cfb8c 100644 --- a/tools/lambda-promtail/Dockerfile +++ b/tools/lambda-promtail/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22-alpine AS build-image +FROM golang:1.23-alpine AS build-image COPY tools/lambda-promtail /src/lambda-promtail WORKDIR /src/lambda-promtail From 2587f3425ddb055517444055bcae73cb69510399 Mon Sep 17 00:00:00 2001 From: Cyril Tovena Date: Wed, 5 Feb 2025 11:27:42 +0100 Subject: [PATCH 28/33] feat(lambda-promtail): Improve relabel configuration parsing and testing (#16100) --- tools/lambda-promtail/lambda-promtail/main.go | 14 +- .../lambda-promtail/relabel.go | 123 ++++++++++++++++++ .../lambda-promtail/relabel_test.go | 121 +++++++++++++++++ 3 files changed, 250 insertions(+), 8 deletions(-) create mode 100644 tools/lambda-promtail/lambda-promtail/relabel.go create mode 100644 tools/lambda-promtail/lambda-promtail/relabel_test.go diff --git a/tools/lambda-promtail/lambda-promtail/main.go b/tools/lambda-promtail/lambda-promtail/main.go index c462773869458..9c27491462f89 100644 --- a/tools/lambda-promtail/lambda-promtail/main.go +++ b/tools/lambda-promtail/lambda-promtail/main.go @@ -103,19 +103,17 @@ func setupArguments() { batchSize, _ = strconv.Atoi(batch) } - print := os.Getenv("PRINT_LOG_LINE") printLogLine = true - if strings.EqualFold(print, "false") { + if strings.EqualFold(os.Getenv("PRINT_LOG_LINE"), "false") { printLogLine = false } s3Clients = make(map[string]*s3.Client) - // Parse relabel configs from environment variable - if relabelConfigsRaw := os.Getenv("RELABEL_CONFIGS"); relabelConfigsRaw != "" { - if err := json.Unmarshal([]byte(relabelConfigsRaw), &relabelConfigs); err != nil { - panic(fmt.Errorf("failed to parse RELABEL_CONFIGS: %v", err)) - } + promConfigs, err := parseRelabelConfigs(os.Getenv("RELABEL_CONFIGS")) + if err != nil { + panic(err) } + relabelConfigs = promConfigs } func parseExtraLabels(extraLabelsRaw string, omitPrefix bool) (model.LabelSet, error) { @@ -131,7 +129,7 @@ func parseExtraLabels(extraLabelsRaw string, omitPrefix bool) (model.LabelSet, e } if len(extraLabelsSplit)%2 != 0 { - return nil, fmt.Errorf(invalidExtraLabelsError) + return nil, errors.New(invalidExtraLabelsError) } for i := 0; i < len(extraLabelsSplit); i += 2 { extractedLabels[model.LabelName(prefix+extraLabelsSplit[i])] = model.LabelValue(extraLabelsSplit[i+1]) diff --git a/tools/lambda-promtail/lambda-promtail/relabel.go b/tools/lambda-promtail/lambda-promtail/relabel.go new file mode 100644 index 0000000000000..7368ca4c6820f --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/relabel.go @@ -0,0 +1,123 @@ +package main + +import ( + "encoding/json" + "fmt" + + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/model/relabel" +) + +// copy and modification of github.com/prometheus/prometheus/model/relabel/relabel.go +// reason: the custom types in github.com/prometheus/prometheus/model/relabel/relabel.go are difficult to unmarshal +type RelabelConfig struct { + // A list of labels from which values are taken and concatenated + // with the configured separator in order. + SourceLabels []string `json:"source_labels,omitempty"` + // Separator is the string between concatenated values from the source labels. + Separator string `json:"separator,omitempty"` + // Regex against which the concatenation is matched. + Regex string `json:"regex,omitempty"` + // Modulus to take of the hash of concatenated values from the source labels. + Modulus uint64 `json:"modulus,omitempty"` + // TargetLabel is the label to which the resulting string is written in a replacement. + // Regexp interpolation is allowed for the replace action. + TargetLabel string `json:"target_label,omitempty"` + // Replacement is the regex replacement pattern to be used. + Replacement string `json:"replacement,omitempty"` + // Action is the action to be performed for the relabeling. + Action string `json:"action,omitempty"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (rc *RelabelConfig) UnmarshalJSON(data []byte) error { + *rc = RelabelConfig{ + Action: string(relabel.Replace), + Separator: ";", + Regex: "(.*)", + Replacement: "$1", + } + type plain RelabelConfig + if err := json.Unmarshal(data, (*plain)(rc)); err != nil { + return err + } + return nil +} + +// ToPrometheusConfig converts our JSON-friendly RelabelConfig to the Prometheus RelabelConfig +func (rc *RelabelConfig) ToPrometheusConfig() (*relabel.Config, error) { + var regex relabel.Regexp + if rc.Regex != "" { + var err error + regex, err = relabel.NewRegexp(rc.Regex) + if err != nil { + return nil, fmt.Errorf("invalid regex %q: %w", rc.Regex, err) + } + } else { + regex = relabel.DefaultRelabelConfig.Regex + } + + action := relabel.Action(rc.Action) + if rc.Action == "" { + action = relabel.DefaultRelabelConfig.Action + } + + separator := rc.Separator + if separator == "" { + separator = relabel.DefaultRelabelConfig.Separator + } + + replacement := rc.Replacement + if replacement == "" { + replacement = relabel.DefaultRelabelConfig.Replacement + } + + sourceLabels := make(model.LabelNames, 0, len(rc.SourceLabels)) + for _, l := range rc.SourceLabels { + sourceLabels = append(sourceLabels, model.LabelName(l)) + } + + cfg := &relabel.Config{ + SourceLabels: sourceLabels, + Separator: separator, + Regex: regex, + Modulus: rc.Modulus, + TargetLabel: rc.TargetLabel, + Replacement: replacement, + Action: action, + } + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid relabel config: %w", err) + } + return cfg, nil +} + +func ToPrometheusConfigs(cfgs []*RelabelConfig) ([]*relabel.Config, error) { + promConfigs := make([]*relabel.Config, 0, len(cfgs)) + for _, cfg := range cfgs { + promCfg, err := cfg.ToPrometheusConfig() + if err != nil { + return nil, fmt.Errorf("invalid relabel config: %w", err) + } + promConfigs = append(promConfigs, promCfg) + } + return promConfigs, nil +} + +func parseRelabelConfigs(relabelConfigsRaw string) ([]*relabel.Config, error) { + if relabelConfigsRaw == "" { + return nil, nil + } + + var relabelConfigs []*RelabelConfig + + if err := json.Unmarshal([]byte(relabelConfigsRaw), &relabelConfigs); err != nil { + return nil, fmt.Errorf("failed to parse RELABEL_CONFIGS: %v", err) + } + promConfigs, err := ToPrometheusConfigs(relabelConfigs) + if err != nil { + return nil, fmt.Errorf("failed to parse RELABEL_CONFIGS: %v", err) + } + return promConfigs, nil +} diff --git a/tools/lambda-promtail/lambda-promtail/relabel_test.go b/tools/lambda-promtail/lambda-promtail/relabel_test.go new file mode 100644 index 0000000000000..cb4cbebe45921 --- /dev/null +++ b/tools/lambda-promtail/lambda-promtail/relabel_test.go @@ -0,0 +1,121 @@ +package main + +import ( + "testing" + + "github.com/prometheus/prometheus/model/relabel" + "github.com/stretchr/testify/require" + + "github.com/grafana/regexp" +) + +func TestParseRelabelConfigs(t *testing.T) { + tests := []struct { + name string + input string + want []*relabel.Config + wantErr bool + }{ + { + name: "empty input", + input: "", + want: nil, + wantErr: false, + }, + { + name: "default config", + input: `[{"target_label": "new_label"}]`, + want: []*relabel.Config{ + { + TargetLabel: "new_label", + Action: relabel.Replace, + Regex: relabel.Regexp{Regexp: regexp.MustCompile("(.*)")}, + Replacement: "$1", + }, + }, + wantErr: false, + }, + { + name: "invalid JSON", + input: "invalid json", + wantErr: true, + }, + { + name: "valid single config", + input: `[{ + "source_labels": ["__name__"], + "regex": "my_metric_.*", + "target_label": "new_label", + "replacement": "foo", + "action": "replace" + }]`, + wantErr: false, + }, + { + name: "invalid regex", + input: `[{ + "source_labels": ["__name__"], + "regex": "[[invalid regex", + "target_label": "new_label", + "action": "replace" + }]`, + wantErr: true, + }, + { + name: "multiple valid configs", + input: `[ + { + "source_labels": ["__name__"], + "regex": "my_metric_.*", + "target_label": "new_label", + "replacement": "foo", + "action": "replace" + }, + { + "source_labels": ["label1", "label2"], + "separator": ";", + "regex": "val1;val2", + "target_label": "combined", + "action": "replace" + } + ]`, + wantErr: false, + }, + { + name: "invalid action", + input: `[{ + "source_labels": ["__name__"], + "regex": "my_metric_.*", + "target_label": "new_label", + "action": "invalid_action" + }]`, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := parseRelabelConfigs(tt.input) + if tt.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + if tt.input == "" { + require.Nil(t, got) + return + } + + require.NotNil(t, got) + // For valid configs, verify they can be used for relabeling + // This implicitly tests that the conversion was successful + if len(got) > 0 { + for _, cfg := range got { + require.NotNil(t, cfg) + require.NotEmpty(t, cfg.Action) + } + } + }) + } +} From 5c8e8322605c76d569b4af95c00dc9b0fa390062 Mon Sep 17 00:00:00 2001 From: Dylan Guedes Date: Wed, 5 Feb 2025 07:32:25 -0300 Subject: [PATCH 29/33] feat: Introduce policy stream mapping (#15982) **What this PR does / why we need it**: Introduces the idea of policies to Loki, which are recognizable based on the given stream selectors. This is an improved version of https://github.com/grafana/loki/pull/15561 and built on top of https://github.com/grafana/loki/pull/15875. A policy mapping can be configured the following way: ```yaml 12345: policy_stream_mapping: policy6: - selector: `{env="prod"}` priority: 2 - selector: `{env=~"prod|staging"}` priority: 1 - selector: `{team="finance"}` priority: 4 policy7: - selector: `{env=~"prod|dev"}` priority: 3 ``` With that configuration, pushes to tenant `12345` with the labels `{env="prod", team="finance"}` would be assigned to policy6 because the third mapping for policy6 matches these labels and has higher priority than any other matching. --- docs/sources/shared/configuration.md | 14 +++ pkg/compactor/retention/expiration.go | 1 + pkg/compactor/retention/expiration_test.go | 9 +- pkg/distributor/distributor.go | 48 +++++----- pkg/distributor/distributor_test.go | 63 ++++++++++++- pkg/distributor/validation_metrics.go | 40 +++++--- pkg/distributor/validator.go | 44 ++++----- pkg/distributor/validator_test.go | 4 +- pkg/ingester/instance.go | 11 ++- pkg/ingester/stream.go | 9 +- pkg/validation/ingestion_policies.go | 47 +++++++++ pkg/validation/ingestion_policies_test.go | 105 +++++++++++++++++++++ pkg/validation/limits.go | 23 ++++- pkg/validation/limits_test.go | 21 +++-- pkg/validation/validate.go | 4 +- 15 files changed, 359 insertions(+), 84 deletions(-) create mode 100644 pkg/validation/ingestion_policies.go create mode 100644 pkg/validation/ingestion_policies_test.go diff --git a/docs/sources/shared/configuration.md b/docs/sources/shared/configuration.md index b53d5266caba6..8ad1332e213c9 100644 --- a/docs/sources/shared/configuration.md +++ b/docs/sources/shared/configuration.md @@ -3613,6 +3613,20 @@ otlp_config: # CLI flag: -validation.enforced-labels [enforced_labels: | default = []] +# Map of policies to stream selectors with a priority. Experimental. +# Example: +# policy_stream_mapping: +# finance: +# - selectors: ["{namespace="prod", container="billing"}"] +# priority: 2 +# ops: +# - selectors: ["{namespace="prod", container="ops"}"] +# priority: 1 +# staging: +# - selectors: ["{namespace="staging"}, {namespace="dev"}"] +# priority: 1 +[policy_stream_mapping: ] + # The number of partitions a tenant's data should be sharded to when using kafka # ingestion. Tenants are sharded across partitions using shuffle-sharding. 0 # disables shuffle sharding and tenant is sharded across all partitions. diff --git a/pkg/compactor/retention/expiration.go b/pkg/compactor/retention/expiration.go index 3ab412d2306dc..392999a0d6908 100644 --- a/pkg/compactor/retention/expiration.go +++ b/pkg/compactor/retention/expiration.go @@ -42,6 +42,7 @@ type Limits interface { StreamRetention(userID string) []validation.StreamRetention AllByUserID() map[string]*validation.Limits DefaultLimits() *validation.Limits + PoliciesStreamMapping(userID string) validation.PolicyStreamMapping } func NewExpirationChecker(limits Limits) ExpirationChecker { diff --git a/pkg/compactor/retention/expiration_test.go b/pkg/compactor/retention/expiration_test.go index 8824919c4298e..09e04c4517082 100644 --- a/pkg/compactor/retention/expiration_test.go +++ b/pkg/compactor/retention/expiration_test.go @@ -13,8 +13,9 @@ import ( ) type retentionLimit struct { - retentionPeriod time.Duration - streamRetention []validation.StreamRetention + retentionPeriod time.Duration + streamRetention []validation.StreamRetention + policyStreamMapping validation.PolicyStreamMapping } func (r retentionLimit) convertToValidationLimit() *validation.Limits { @@ -33,6 +34,10 @@ func (f fakeLimits) RetentionPeriod(userID string) time.Duration { return f.perTenant[userID].retentionPeriod } +func (f fakeLimits) PoliciesStreamMapping(_ string) validation.PolicyStreamMapping { + return f.perTenant["user0"].policyStreamMapping +} + func (f fakeLimits) StreamRetention(userID string) []validation.StreamRetention { return f.perTenant[userID].streamRetention } diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index d44206a3e4998..8c44a66832c44 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -528,14 +528,14 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log d.truncateLines(validationContext, &stream) var lbs labels.Labels - var retentionHours string - lbs, stream.Labels, stream.Hash, retentionHours, err = d.parseStreamLabels(validationContext, stream.Labels, stream) + var retentionHours, policy string + lbs, stream.Labels, stream.Hash, retentionHours, policy, err = d.parseStreamLabels(validationContext, stream.Labels, stream) if err != nil { d.writeFailuresManager.Log(tenantID, err) validationErrors.Add(err) - validation.DiscardedSamples.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours).Add(float64(len(stream.Entries))) + validation.DiscardedSamples.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours, policy).Add(float64(len(stream.Entries))) discardedBytes := util.EntriesTotalSize(stream.Entries) - validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours).Add(float64(discardedBytes)) + validation.DiscardedBytes.WithLabelValues(validation.InvalidLabels, tenantID, retentionHours, policy).Add(float64(discardedBytes)) continue } @@ -543,9 +543,9 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log err := fmt.Errorf(validation.MissingEnforcedLabelsErrorMsg, strings.Join(lbsMissing, ","), tenantID) d.writeFailuresManager.Log(tenantID, err) validationErrors.Add(err) - validation.DiscardedSamples.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours).Add(float64(len(stream.Entries))) + validation.DiscardedSamples.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours, policy).Add(float64(len(stream.Entries))) discardedBytes := util.EntriesTotalSize(stream.Entries) - validation.DiscardedBytes.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours).Add(float64(discardedBytes)) + validation.DiscardedBytes.WithLabelValues(validation.MissingEnforcedLabels, tenantID, retentionHours, policy).Add(float64(discardedBytes)) continue } @@ -554,7 +554,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log prevTs := stream.Entries[0].Timestamp for _, entry := range stream.Entries { - if err := d.validator.ValidateEntry(ctx, validationContext, lbs, entry, retentionHours); err != nil { + if err := d.validator.ValidateEntry(ctx, validationContext, lbs, entry, retentionHours, policy); err != nil { d.writeFailuresManager.Log(tenantID, err) validationErrors.Add(err) continue @@ -609,7 +609,7 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log } n++ - validationContext.validationMetrics.compute(entry, retentionHours) + validationContext.validationMetrics.compute(entry, retentionHours, policy) pushSize += len(entry.Line) } stream.Entries = stream.Entries[:n] @@ -647,10 +647,10 @@ func (d *Distributor) Push(ctx context.Context, req *logproto.PushRequest) (*log return nil, httpgrpc.Errorf(retStatusCode, "%s", err.Error()) } - if !d.ingestionRateLimiter.AllowN(now, tenantID, validationContext.validationMetrics.lineSize) { + if !d.ingestionRateLimiter.AllowN(now, tenantID, validationContext.validationMetrics.aggregatedPushStats.lineSize) { d.trackDiscardedData(ctx, req, validationContext, tenantID, validationContext.validationMetrics, validation.RateLimited) - err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validationContext.validationMetrics.lineCount, validationContext.validationMetrics.lineSize) + err = fmt.Errorf(validation.RateLimitedErrorMsg, tenantID, int(d.ingestionRateLimiter.Limit(now, tenantID)), validationContext.validationMetrics.aggregatedPushStats.lineCount, validationContext.validationMetrics.aggregatedPushStats.lineSize) d.writeFailuresManager.Log(tenantID, err) // Return a 429 to indicate to the client they are being rate limited return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "%s", err.Error()) @@ -787,14 +787,16 @@ func (d *Distributor) trackDiscardedData( validationMetrics validationMetrics, reason string, ) { - for retentionHours, count := range validationMetrics.lineCountPerRetentionHours { - validation.DiscardedSamples.WithLabelValues(reason, tenantID, retentionHours).Add(float64(count)) - validation.DiscardedBytes.WithLabelValues(reason, tenantID, retentionHours).Add(float64(validationMetrics.lineSizePerRetentionHours[retentionHours])) + for policy, retentionToStats := range validationMetrics.policyPushStats { + for retentionHours, stats := range retentionToStats { + validation.DiscardedSamples.WithLabelValues(reason, tenantID, retentionHours, policy).Add(float64(stats.lineCount)) + validation.DiscardedBytes.WithLabelValues(reason, tenantID, retentionHours, policy).Add(float64(stats.lineSize)) + } } if d.usageTracker != nil { for _, stream := range req.Streams { - lbs, _, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream) + lbs, _, _, _, _, err := d.parseStreamLabels(validationContext, stream.Labels, stream) if err != nil { continue } @@ -1173,28 +1175,32 @@ type labelData struct { hash uint64 } -func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream logproto.Stream) (labels.Labels, string, uint64, string, error) { +func (d *Distributor) parseStreamLabels(vContext validationContext, key string, stream logproto.Stream) (labels.Labels, string, uint64, string, string, error) { + mapping := d.validator.Limits.PoliciesStreamMapping(vContext.userID) if val, ok := d.labelCache.Get(key); ok { retentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, val.ls) - return val.ls, val.ls.String(), val.hash, retentionHours, nil + policy := mapping.PolicyFor(val.ls) + return val.ls, val.ls.String(), val.hash, retentionHours, policy, nil } ls, err := syntax.ParseLabels(key) if err != nil { - tenantRetentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, nil) - return nil, "", 0, tenantRetentionHours, fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err) + retentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, nil) + // TODO: check for global policy. + return nil, "", 0, retentionHours, mapping.PolicyFor(nil), fmt.Errorf(validation.InvalidLabelsErrorMsg, key, err) } + policy := mapping.PolicyFor(ls) retentionHours := d.tenantsRetention.RetentionHoursFor(vContext.userID, ls) - if err := d.validator.ValidateLabels(vContext, ls, stream, retentionHours); err != nil { - return nil, "", 0, retentionHours, err + if err := d.validator.ValidateLabels(vContext, ls, stream, retentionHours, policy); err != nil { + return nil, "", 0, retentionHours, policy, err } lsHash := ls.Hash() d.labelCache.Add(key, labelData{ls, lsHash}) - return ls, ls.String(), lsHash, retentionHours, nil + return ls, ls.String(), lsHash, retentionHours, policy, nil } // shardCountFor returns the right number of shards to be used by the given stream. diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 66aa653a20276..5558dde497c7e 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -1233,7 +1233,7 @@ func Benchmark_SortLabelsOnPush(b *testing.B) { for n := 0; n < b.N; n++ { stream := request.Streams[0] stream.Labels = `{buzz="f", a="b"}` - _, _, _, _, err := d.parseStreamLabels(vCtx, stream.Labels, stream) + _, _, _, _, _, err := d.parseStreamLabels(vCtx, stream.Labels, stream) if err != nil { panic("parseStreamLabels fail,err:" + err.Error()) } @@ -1279,7 +1279,7 @@ func TestParseStreamLabels(t *testing.T) { vCtx := d.validator.getValidationContextForTime(testTime, "123") t.Run(tc.name, func(t *testing.T) { - lbs, lbsString, hash, _, err := d.parseStreamLabels(vCtx, tc.origLabels, logproto.Stream{ + lbs, lbsString, hash, _, _, err := d.parseStreamLabels(vCtx, tc.origLabels, logproto.Stream{ Labels: tc.origLabels, }) if tc.expectedErr != nil { @@ -2063,3 +2063,62 @@ func TestDistributor_StructuredMetadataSanitization(t *testing.T) { assert.Equal(t, tc.numSanitizations, testutil.ToFloat64(distributors[0].tenantPushSanitizedStructuredMetadata.WithLabelValues("test"))) } } + +func BenchmarkDistributor_PushWithPolicies(b *testing.B) { + baselineLimits := &validation.Limits{} + flagext.DefaultValues(baselineLimits) + lbs := `{foo="bar", env="prod", daz="baz", container="loki", pod="loki-0"}` + + b.Run("push without policies", func(b *testing.B) { + limits := baselineLimits + limits.PolicyStreamMapping = make(validation.PolicyStreamMapping) + distributors, _ := prepare(&testing.T{}, 1, 3, limits, nil) + req := makeWriteRequestWithLabels(10, 10, []string{lbs}, false, false, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + distributors[0].Push(ctx, req) //nolint:errcheck + } + }) + + for numPolicies := 1; numPolicies <= 100; numPolicies *= 10 { + b.Run(fmt.Sprintf("push with %d policies", numPolicies), func(b *testing.B) { + limits := baselineLimits + limits.PolicyStreamMapping = make(validation.PolicyStreamMapping) + for i := 1; i <= numPolicies; i++ { + limits.PolicyStreamMapping[fmt.Sprintf("policy%d", i)] = []*validation.PriorityStream{ + { + Selector: `{foo="bar"}`, Priority: i, + }, + } + } + + req := makeWriteRequestWithLabels(10, 10, []string{lbs}, false, false, false) + distributors, _ := prepare(&testing.T{}, 1, 3, limits, nil) + b.ResetTimer() + for i := 0; i < b.N; i++ { + distributors[0].Push(ctx, req) //nolint:errcheck + } + }) + } + + for numMatchers := 1; numMatchers <= 100; numMatchers *= 10 { + b.Run(fmt.Sprintf("push with %d matchers", numMatchers), func(b *testing.B) { + limits := baselineLimits + limits.PolicyStreamMapping = make(validation.PolicyStreamMapping) + for i := 1; i <= numMatchers; i++ { + limits.PolicyStreamMapping["policy0"] = append(limits.PolicyStreamMapping["policy0"], &validation.PriorityStream{ + Selector: `{foo="bar"}`, + Matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "foo", "bar")}, + Priority: i, + }) + } + + req := makeWriteRequestWithLabels(10, 10, []string{lbs}, false, false, false) + distributors, _ := prepare(&testing.T{}, 1, 3, limits, nil) + b.ResetTimer() + for i := 0; i < b.N; i++ { + distributors[0].Push(ctx, req) //nolint:errcheck + } + }) + } +} diff --git a/pkg/distributor/validation_metrics.go b/pkg/distributor/validation_metrics.go index 9f22a65062ba4..a005f92decad3 100644 --- a/pkg/distributor/validation_metrics.go +++ b/pkg/distributor/validation_metrics.go @@ -5,26 +5,40 @@ import ( "github.com/grafana/loki/v3/pkg/util" ) +type pushStats struct { + lineSize int + lineCount int +} + type validationMetrics struct { - lineSizePerRetentionHours map[string]int - lineCountPerRetentionHours map[string]int - lineSize int - lineCount int - tenantRetentionHours string + policyPushStats map[string]map[string]pushStats // policy -> retentionHours -> lineSize + tenantRetentionHours string + aggregatedPushStats pushStats } func newValidationMetrics(tenantRetentionHours string) validationMetrics { return validationMetrics{ - lineSizePerRetentionHours: make(map[string]int), - lineCountPerRetentionHours: make(map[string]int), - tenantRetentionHours: tenantRetentionHours, + policyPushStats: make(map[string]map[string]pushStats), + tenantRetentionHours: tenantRetentionHours, } } -func (v *validationMetrics) compute(entry logproto.Entry, retentionHours string) { +func (v *validationMetrics) compute(entry logproto.Entry, retentionHours string, policy string) { + if _, ok := v.policyPushStats[policy]; !ok { + v.policyPushStats[policy] = make(map[string]pushStats) + } + + if _, ok := v.policyPushStats[policy][retentionHours]; !ok { + v.policyPushStats[policy][retentionHours] = pushStats{} + } + totalEntrySize := util.EntryTotalSize(&entry) - v.lineSizePerRetentionHours[retentionHours] += totalEntrySize - v.lineCountPerRetentionHours[retentionHours]++ - v.lineSize += totalEntrySize - v.lineCount++ + + v.aggregatedPushStats.lineSize += totalEntrySize + v.aggregatedPushStats.lineCount++ + + stats := v.policyPushStats[policy][retentionHours] + stats.lineCount++ + stats.lineSize += totalEntrySize + v.policyPushStats[policy][retentionHours] = stats } diff --git a/pkg/distributor/validator.go b/pkg/distributor/validator.go index 96771b925d059..4f99feffb9ae5 100644 --- a/pkg/distributor/validator.go +++ b/pkg/distributor/validator.go @@ -93,7 +93,7 @@ func (v Validator) getValidationContextForTime(now time.Time, userID string) val } // ValidateEntry returns an error if the entry is invalid and report metrics for invalid entries accordingly. -func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, labels labels.Labels, entry logproto.Entry, retentionHours string) error { +func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, labels labels.Labels, entry logproto.Entry, retentionHours string, policy string) error { ts := entry.Timestamp.UnixNano() validation.LineLengthHist.Observe(float64(len(entry.Line))) structuredMetadataCount := len(entry.StructuredMetadata) @@ -104,8 +104,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la // Makes time string on the error message formatted consistently. formatedEntryTime := entry.Timestamp.Format(timeFormat) formatedRejectMaxAgeTime := time.Unix(0, vCtx.rejectOldSampleMaxAge).Format(timeFormat) - validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.GreaterThanMaxSampleAge, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.GreaterThanMaxSampleAge, labels, entrySize) } @@ -114,8 +114,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la if ts > vCtx.creationGracePeriod { formatedEntryTime := entry.Timestamp.Format(timeFormat) - validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.TooFarInFuture, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.TooFarInFuture, labels, entrySize) } @@ -127,8 +127,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la // an orthogonal concept (we need not use ValidateLabels in this context) // but the upstream cortex_validation pkg uses it, so we keep this // for parity. - validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.LineTooLong, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.LineTooLong, labels, entrySize) } @@ -137,8 +137,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la if structuredMetadataCount > 0 { if !vCtx.allowStructuredMetadata { - validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.DisallowedStructuredMetadata, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.DisallowedStructuredMetadata, labels, entrySize) } @@ -146,8 +146,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la } if maxSize := vCtx.maxStructuredMetadataSize; maxSize != 0 && structuredMetadataSizeBytes > maxSize { - validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooLarge, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooLarge, labels, entrySize) } @@ -155,8 +155,8 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la } if maxCount := vCtx.maxStructuredMetadataCount; maxCount != 0 && structuredMetadataCount > maxCount { - validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours).Inc() - validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours).Add(entrySize) + validation.DiscardedSamples.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours, policy).Inc() + validation.DiscardedBytes.WithLabelValues(validation.StructuredMetadataTooMany, vCtx.userID, retentionHours, policy).Add(entrySize) if v.usageTracker != nil { v.usageTracker.DiscardedBytesAdd(ctx, vCtx.userID, validation.StructuredMetadataTooMany, labels, entrySize) } @@ -168,9 +168,9 @@ func (v Validator) ValidateEntry(ctx context.Context, vCtx validationContext, la } // Validate labels returns an error if the labels are invalid -func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, stream logproto.Stream, retentionHours string) error { +func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, stream logproto.Stream, retentionHours, policy string) error { if len(ls) == 0 { - validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, ctx.userID, retentionHours).Inc() + validation.DiscardedSamples.WithLabelValues(validation.MissingLabels, ctx.userID, retentionHours, policy).Inc() return fmt.Errorf(validation.MissingLabelsErrorMsg) } @@ -187,20 +187,20 @@ func (v Validator) ValidateLabels(ctx validationContext, ls labels.Labels, strea } if numLabelNames > ctx.maxLabelNamesPerSeries { - updateMetrics(validation.MaxLabelNamesPerSeries, ctx.userID, stream, retentionHours) + updateMetrics(validation.MaxLabelNamesPerSeries, ctx.userID, stream, retentionHours, policy) return fmt.Errorf(validation.MaxLabelNamesPerSeriesErrorMsg, stream.Labels, numLabelNames, ctx.maxLabelNamesPerSeries) } lastLabelName := "" for _, l := range ls { if len(l.Name) > ctx.maxLabelNameLength { - updateMetrics(validation.LabelNameTooLong, ctx.userID, stream, retentionHours) + updateMetrics(validation.LabelNameTooLong, ctx.userID, stream, retentionHours, policy) return fmt.Errorf(validation.LabelNameTooLongErrorMsg, stream.Labels, l.Name) } else if len(l.Value) > ctx.maxLabelValueLength { - updateMetrics(validation.LabelValueTooLong, ctx.userID, stream, retentionHours) + updateMetrics(validation.LabelValueTooLong, ctx.userID, stream, retentionHours, policy) return fmt.Errorf(validation.LabelValueTooLongErrorMsg, stream.Labels, l.Value) } else if cmp := strings.Compare(lastLabelName, l.Name); cmp == 0 { - updateMetrics(validation.DuplicateLabelNames, ctx.userID, stream, retentionHours) + updateMetrics(validation.DuplicateLabelNames, ctx.userID, stream, retentionHours, policy) return fmt.Errorf(validation.DuplicateLabelNamesErrorMsg, stream.Labels, l.Name) } lastLabelName = l.Name @@ -217,8 +217,8 @@ func (v Validator) ShouldBlockIngestion(ctx validationContext, now time.Time) (b return now.Before(ctx.blockIngestionUntil), ctx.blockIngestionUntil, ctx.blockIngestionStatusCode } -func updateMetrics(reason, userID string, stream logproto.Stream, retentionHours string) { - validation.DiscardedSamples.WithLabelValues(reason, userID, retentionHours).Add(float64(len(stream.Entries))) +func updateMetrics(reason, userID string, stream logproto.Stream, retentionHours, policy string) { + validation.DiscardedSamples.WithLabelValues(reason, userID, retentionHours, policy).Add(float64(len(stream.Entries))) bytes := util.EntriesTotalSize(stream.Entries) - validation.DiscardedBytes.WithLabelValues(reason, userID, retentionHours).Add(float64(bytes)) + validation.DiscardedBytes.WithLabelValues(reason, userID, retentionHours, policy).Add(float64(bytes)) } diff --git a/pkg/distributor/validator_test.go b/pkg/distributor/validator_test.go index 0881bd1a06214..73a9d1aa0cc38 100644 --- a/pkg/distributor/validator_test.go +++ b/pkg/distributor/validator_test.go @@ -133,7 +133,7 @@ func TestValidator_ValidateEntry(t *testing.T) { assert.NoError(t, err) retentionHours := util.RetentionHours(v.RetentionPeriod(tt.userID)) - err = v.ValidateEntry(ctx, v.getValidationContextForTime(testTime, tt.userID), testStreamLabels, tt.entry, retentionHours) + err = v.ValidateEntry(ctx, v.getValidationContextForTime(testTime, tt.userID), testStreamLabels, tt.entry, retentionHours, "") assert.Equal(t, tt.expected, err) }) } @@ -232,7 +232,7 @@ func TestValidator_ValidateLabels(t *testing.T) { v, err := NewValidator(o, nil) assert.NoError(t, err) - err = v.ValidateLabels(v.getValidationContextForTime(testTime, tt.userID), mustParseLabels(tt.labels), logproto.Stream{Labels: tt.labels}, retentionHours) + err = v.ValidateLabels(v.getValidationContextForTime(testTime, tt.userID), mustParseLabels(tt.labels), logproto.Stream{Labels: tt.labels}, retentionHours, "") assert.Equal(t, tt.expected, err) }) } diff --git a/pkg/ingester/instance.go b/pkg/ingester/instance.go index 72a02df159103..30257681c9b9a 100644 --- a/pkg/ingester/instance.go +++ b/pkg/ingester/instance.go @@ -297,13 +297,14 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre } retentionHours := util.RetentionHours(i.tenantsRetention.RetentionPeriodFor(i.instanceID, labels)) - + mapping := i.limiter.limits.PoliciesStreamMapping(i.instanceID) + policy := mapping.PolicyFor(labels) if record != nil { err = i.streamCountLimiter.AssertNewStreamAllowed(i.instanceID) } if err != nil { - return i.onStreamCreationError(ctx, pushReqStream, err, labels, retentionHours) + return i.onStreamCreationError(ctx, pushReqStream, err, labels, retentionHours, policy) } fp := i.getHashForLabels(labels) @@ -333,7 +334,7 @@ func (i *instance) createStream(ctx context.Context, pushReqStream logproto.Stre return s, nil } -func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logproto.Stream, err error, labels labels.Labels, retentionHours string) (*stream, error) { +func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logproto.Stream, err error, labels labels.Labels, retentionHours, policy string) (*stream, error) { if i.configs.LogStreamCreation(i.instanceID) || i.cfg.KafkaIngestion.Enabled { l := level.Debug(util_log.Logger) @@ -349,9 +350,9 @@ func (i *instance) onStreamCreationError(ctx context.Context, pushReqStream logp ) } - validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours).Add(float64(len(pushReqStream.Entries))) + validation.DiscardedSamples.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours, policy).Add(float64(len(pushReqStream.Entries))) bytes := util.EntriesTotalSize(pushReqStream.Entries) - validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours).Add(float64(bytes)) + validation.DiscardedBytes.WithLabelValues(validation.StreamLimit, i.instanceID, retentionHours, policy).Add(float64(bytes)) if i.customStreamsTracker != nil { i.customStreamsTracker.DiscardedBytesAdd(ctx, i.instanceID, validation.StreamLimit, labels, float64(bytes)) } diff --git a/pkg/ingester/stream.go b/pkg/ingester/stream.go index 3d2a7bf1d0319..8aa975ab826f8 100644 --- a/pkg/ingester/stream.go +++ b/pkg/ingester/stream.go @@ -84,6 +84,7 @@ type stream struct { configs *runtime.TenantConfigs retentionHours string + policy string } type chunkDesc struct { @@ -481,15 +482,15 @@ func (s *stream) reportMetrics(ctx context.Context, outOfOrderSamples, outOfOrde if s.unorderedWrites { name = validation.TooFarBehind } - validation.DiscardedSamples.WithLabelValues(name, s.tenant, s.retentionHours).Add(float64(outOfOrderSamples)) - validation.DiscardedBytes.WithLabelValues(name, s.tenant, s.retentionHours).Add(float64(outOfOrderBytes)) + validation.DiscardedSamples.WithLabelValues(name, s.tenant, s.retentionHours, s.policy).Add(float64(outOfOrderSamples)) + validation.DiscardedBytes.WithLabelValues(name, s.tenant, s.retentionHours, s.policy).Add(float64(outOfOrderBytes)) if usageTracker != nil { usageTracker.DiscardedBytesAdd(ctx, s.tenant, name, s.labels, float64(outOfOrderBytes)) } } if rateLimitedSamples > 0 { - validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours).Add(float64(rateLimitedSamples)) - validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours).Add(float64(rateLimitedBytes)) + validation.DiscardedSamples.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours, s.policy).Add(float64(rateLimitedSamples)) + validation.DiscardedBytes.WithLabelValues(validation.StreamRateLimit, s.tenant, s.retentionHours, s.policy).Add(float64(rateLimitedBytes)) if usageTracker != nil { usageTracker.DiscardedBytesAdd(ctx, s.tenant, validation.StreamRateLimit, s.labels, float64(rateLimitedBytes)) } diff --git a/pkg/validation/ingestion_policies.go b/pkg/validation/ingestion_policies.go new file mode 100644 index 0000000000000..6183125b0bf81 --- /dev/null +++ b/pkg/validation/ingestion_policies.go @@ -0,0 +1,47 @@ +package validation + +import "github.com/prometheus/prometheus/model/labels" + +type PriorityStream struct { + Priority int `yaml:"priority" json:"priority" doc:"description=The larger the value, the higher the priority."` + Selector string `yaml:"selector" json:"selector" doc:"description=Stream selector expression."` + Matchers []*labels.Matcher `yaml:"-" json:"-"` // populated during validation. +} + +func (p *PriorityStream) Matches(lbs labels.Labels) bool { + for _, m := range p.Matchers { + if !m.Matches(lbs.Get(m.Name)) { + return false + } + } + return true +} + +type PolicyStreamMapping map[string][]*PriorityStream + +func (p *PolicyStreamMapping) PolicyFor(lbs labels.Labels) string { + var ( + matchedPolicy *PriorityStream + found bool + matchedPolicyName string + ) + + for policyName, policyStreams := range *p { + for _, policyStream := range policyStreams { + if found && policyStream.Priority <= matchedPolicy.Priority { + // Even if a match occurs it won't have a higher priority than the current matched policy. + continue + } + + if !policyStream.Matches(lbs) { + continue + } + + found = true + matchedPolicy = policyStream + matchedPolicyName = policyName + } + } + + return matchedPolicyName +} diff --git a/pkg/validation/ingestion_policies_test.go b/pkg/validation/ingestion_policies_test.go new file mode 100644 index 0000000000000..802a7a03ebfb4 --- /dev/null +++ b/pkg/validation/ingestion_policies_test.go @@ -0,0 +1,105 @@ +package validation + +import ( + "testing" + + "github.com/prometheus/prometheus/model/labels" + "github.com/stretchr/testify/require" +) + +func Test_PolicyStreamMapping_PolicyFor(t *testing.T) { + mapping := PolicyStreamMapping{ + "policy1": []*PriorityStream{ + { + Selector: `{foo="bar"}`, + Priority: 2, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + }, + }, + }, + "policy2": []*PriorityStream{ + { + Selector: `{foo="bar", daz="baz"}`, + Priority: 1, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "foo", "bar"), + labels.MustNewMatcher(labels.MatchEqual, "daz", "baz"), + }, + }, + }, + "policy3": []*PriorityStream{ + { + Selector: `{qyx="qzx", qox="qox"}`, + Priority: 1, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "qyx", "qzx"), + labels.MustNewMatcher(labels.MatchEqual, "qox", "qox"), + }, + }, + }, + "policy4": []*PriorityStream{ + { + Selector: `{qyx="qzx", qox="qox"}`, + Priority: 1, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "qyx", "qzx"), + labels.MustNewMatcher(labels.MatchEqual, "qox", "qox"), + }, + }, + }, + "policy5": []*PriorityStream{ + { + Selector: `{qab=~"qzx.*"}`, + Priority: 1, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "qab", "qzx.*"), + }, + }, + }, + "policy6": []*PriorityStream{ + { + Selector: `{env="prod"}`, + Priority: 2, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "env", "prod"), + }, + }, + { + Selector: `{env=~"prod|staging"}`, + Priority: 1, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "env", "prod|staging"), + }, + }, + { + Selector: `{team="finance"}`, + Priority: 4, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, "team", "finance"), + }, + }, + }, + "policy7": []*PriorityStream{ + { + Selector: `{env=~"prod|dev"}`, + Priority: 3, + Matchers: []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchRegexp, "env", "prod|dev"), + }, + }, + }, + } + + require.Equal(t, "policy1", mapping.PolicyFor(labels.FromStrings("foo", "bar"))) + // matches both policy2 and policy1 but policy1 has higher priority. + require.Equal(t, "policy1", mapping.PolicyFor(labels.FromStrings("foo", "bar", "daz", "baz"))) + // matches policy3 and policy4 but policy3 appears first. + require.Equal(t, "policy3", mapping.PolicyFor(labels.FromStrings("qyx", "qzx", "qox", "qox"))) + // matches no policy. + require.Equal(t, "", mapping.PolicyFor(labels.FromStrings("foo", "fooz", "daz", "qux", "quux", "corge"))) + // matches policy5 through regex. + require.Equal(t, "policy5", mapping.PolicyFor(labels.FromStrings("qab", "qzxqox"))) + + require.Equal(t, "policy6", mapping.PolicyFor(labels.FromStrings("env", "prod", "team", "finance"))) +} diff --git a/pkg/validation/limits.go b/pkg/validation/limits.go index ee72b049457dd..be3528c5fd160 100644 --- a/pkg/validation/limits.go +++ b/pkg/validation/limits.go @@ -227,9 +227,10 @@ type Limits struct { OTLPConfig push.OTLPConfig `yaml:"otlp_config" json:"otlp_config" doc:"description=OTLP log ingestion configurations"` GlobalOTLPConfig push.GlobalOTLPConfig `yaml:"-" json:"-"` - BlockIngestionUntil dskit_flagext.Time `yaml:"block_ingestion_until" json:"block_ingestion_until"` - BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"` - EnforcedLabels []string `yaml:"enforced_labels" json:"enforced_labels" category:"experimental"` + BlockIngestionUntil dskit_flagext.Time `yaml:"block_ingestion_until" json:"block_ingestion_until"` + BlockIngestionStatusCode int `yaml:"block_ingestion_status_code" json:"block_ingestion_status_code"` + EnforcedLabels []string `yaml:"enforced_labels" json:"enforced_labels" category:"experimental"` + PolicyStreamMapping PolicyStreamMapping `yaml:"policy_stream_mapping" json:"policy_stream_mapping" category:"experimental" doc:"description=Map of policies to stream selectors with a priority. Experimental.\nExample:\npolicy_stream_mapping:\n finance:\n - selectors: [\"{namespace=\"prod\", container=\"billing\"}\"]\n priority: 2\n ops:\n - selectors: [\"{namespace=\"prod\", container=\"ops\"}\"]\n priority: 1\n staging:\n - selectors: [\"{namespace=\"staging\"}, {namespace=\"dev\"}\"]\n priority: 1"` IngestionPartitionsTenantShardSize int `yaml:"ingestion_partitions_tenant_shard_size" json:"ingestion_partitions_tenant_shard_size" category:"experimental"` @@ -511,6 +512,18 @@ func (l *Limits) Validate() error { } } + if l.PolicyStreamMapping != nil { + for policyName, policyStreams := range l.PolicyStreamMapping { + for idx, policyStream := range policyStreams { + matchers, err := syntax.ParseMatchers(policyStream.Selector, true) + if err != nil { + return fmt.Errorf("invalid labels matchers for policy stream mapping: %w", err) + } + l.PolicyStreamMapping[policyName][idx].Matchers = matchers + } + } + } + if _, err := deletionmode.ParseMode(l.DeletionMode); err != nil { return err } @@ -1111,6 +1124,10 @@ func (o *Overrides) EnforcedLabels(userID string) []string { return o.getOverridesForUser(userID).EnforcedLabels } +func (o *Overrides) PoliciesStreamMapping(userID string) PolicyStreamMapping { + return o.getOverridesForUser(userID).PolicyStreamMapping +} + func (o *Overrides) ShardAggregations(userID string) []string { return o.getOverridesForUser(userID).ShardAggregations } diff --git a/pkg/validation/limits_test.go b/pkg/validation/limits_test.go index 845ec805c5b37..6c91f87d84dd8 100644 --- a/pkg/validation/limits_test.go +++ b/pkg/validation/limits_test.go @@ -226,8 +226,9 @@ ruler_remote_write_headers: Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyStreamMapping: PolicyStreamMapping{}, }, }, { @@ -246,8 +247,9 @@ ruler_remote_write_headers: Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyStreamMapping: PolicyStreamMapping{}, }, }, { @@ -272,6 +274,7 @@ retention_stream: RulerRemoteWriteHeaders: OverwriteMarshalingStringMap{map[string]string{"a": "b"}}, OTLPConfig: defaultOTLPConfig, EnforcedLabels: []string{}, + PolicyStreamMapping: PolicyStreamMapping{}, }, }, { @@ -293,8 +296,9 @@ reject_old_samples: true Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyStreamMapping: PolicyStreamMapping{}, }, }, { @@ -317,8 +321,9 @@ query_timeout: 5m Selector: `{a="b"}`, }, }, - OTLPConfig: defaultOTLPConfig, - EnforcedLabels: []string{}, + OTLPConfig: defaultOTLPConfig, + EnforcedLabels: []string{}, + PolicyStreamMapping: PolicyStreamMapping{}, }, }, } { diff --git a/pkg/validation/validate.go b/pkg/validation/validate.go index 31e1729e264cc..e6af919adceff 100644 --- a/pkg/validation/validate.go +++ b/pkg/validation/validate.go @@ -115,7 +115,7 @@ var DiscardedBytes = promauto.NewCounterVec( Name: "discarded_bytes_total", Help: "The total number of bytes that were discarded.", }, - []string{ReasonLabel, "tenant", "retention_hours"}, + []string{ReasonLabel, "tenant", "retention_hours", "policy"}, ) // DiscardedSamples is a metric of the number of discarded samples, by reason. @@ -125,7 +125,7 @@ var DiscardedSamples = promauto.NewCounterVec( Name: "discarded_samples_total", Help: "The total number of samples that were discarded.", }, - []string{ReasonLabel, "tenant", "retention_hours"}, + []string{ReasonLabel, "tenant", "retention_hours", "policy"}, ) var LineLengthHist = promauto.NewHistogram(prometheus.HistogramOpts{ From 0c38b947648510894d0d8f50d5cccc9a57576298 Mon Sep 17 00:00:00 2001 From: Adin Hodovic Date: Wed, 5 Feb 2025 12:28:59 +0100 Subject: [PATCH 30/33] fix(helm): Disable service monitor for nginx service (#12746) Co-authored-by: Vladyslav Diachenko <82767850+vlad-diachenko@users.noreply.github.com> --- production/helm/loki/CHANGELOG.md | 4 ++++ production/helm/loki/Chart.yaml | 2 +- production/helm/loki/README.md | 4 ++-- production/helm/loki/templates/gateway/service-gateway.yaml | 3 +++ 4 files changed, 10 insertions(+), 3 deletions(-) diff --git a/production/helm/loki/CHANGELOG.md b/production/helm/loki/CHANGELOG.md index fd833cdcea660..6e39c46487695 100644 --- a/production/helm/loki/CHANGELOG.md +++ b/production/helm/loki/CHANGELOG.md @@ -13,6 +13,10 @@ Entries should include a reference to the pull request that introduced the chang [//]: # ( : do not remove this line. This locator is used by the CI pipeline to automatically create a changelog entry for each new Loki release. Add other chart versions and respective changelog entries bellow this line.) +## 6.25.1 + +- [BUGFIX] Disable service monitor for nginx service. + ## 6.25.0 - [BUGFIX] Removed minio-mc init container from admin-api. diff --git a/production/helm/loki/Chart.yaml b/production/helm/loki/Chart.yaml index 87a6dbf856cc3..c78a2b9901e80 100644 --- a/production/helm/loki/Chart.yaml +++ b/production/helm/loki/Chart.yaml @@ -3,7 +3,7 @@ name: loki description: Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. type: application appVersion: 3.3.2 -version: 6.25.0 +version: 6.25.1 home: https://grafana.github.io/helm-charts sources: - https://github.com/grafana/loki diff --git a/production/helm/loki/README.md b/production/helm/loki/README.md index 50524cea02a9f..7e2aa73e9803e 100644 --- a/production/helm/loki/README.md +++ b/production/helm/loki/README.md @@ -1,6 +1,6 @@ # loki -![Version: 6.25.0](https://img.shields.io/badge/Version-6.25.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.3.2](https://img.shields.io/badge/AppVersion-3.3.2-informational?style=flat-square) +![Version: 6.25.1](https://img.shields.io/badge/Version-6.25.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 3.3.2](https://img.shields.io/badge/AppVersion-3.3.2-informational?style=flat-square) Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, scalable and distributed modes. @@ -15,7 +15,7 @@ Helm chart for Grafana Loki and Grafana Enterprise Logs supporting both simple, | Repository | Name | Version | |------------|------|---------| | https://charts.min.io/ | minio(minio) | 5.4.0 | -| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.5.0 | +| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.5.1 | | https://grafana.github.io/helm-charts | rollout_operator(rollout-operator) | 0.23.0 | Find more information in the Loki Helm Chart [documentation](https://grafana.com/docs/loki/next/installation/helm). diff --git a/production/helm/loki/templates/gateway/service-gateway.yaml b/production/helm/loki/templates/gateway/service-gateway.yaml index 8c710263d7d2b..af44d0ee693a5 100644 --- a/production/helm/loki/templates/gateway/service-gateway.yaml +++ b/production/helm/loki/templates/gateway/service-gateway.yaml @@ -12,6 +12,9 @@ metadata: {{- with .Values.gateway.service.labels }} {{- toYaml . | nindent 4}} {{- end }} + {{- if not (and .Values.enterprise.enabled .Values.enterprise.gelGateway) }} + prometheus.io/service-monitor: "false" + {{- end }} annotations: {{- with .Values.loki.serviceAnnotations }} {{- toYaml . | nindent 4}} From fe315eff2c4fa5704fea6254010ec5a89fdaf9fe Mon Sep 17 00:00:00 2001 From: Ashwanth Date: Wed, 5 Feb 2025 17:32:21 +0530 Subject: [PATCH 31/33] chore(ksonnet): configure s3 object store region (#16103) --- production/ksonnet/loki/config.libsonnet | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index 7c7ac4345ced7..c1920e715bc1f 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -122,6 +122,11 @@ } else { s3: 's3://' + $._config.s3_address + '/' + $._config.s3_bucket_name, } + ) + ( + if $._config.s3_bucket_region != '' then { + region: $._config.s3_bucket_region, + } + else {} ), } else if $._config.storage_backend == 'azure' then { azure: { From c2e1e88e850cffcb9c34cd7c6296326a5cc8a15a Mon Sep 17 00:00:00 2001 From: Salva Corts Date: Wed, 5 Feb 2025 14:06:06 +0100 Subject: [PATCH 32/33] feat(policies): Add PoliciesStreamMapping to loghttp limits interface (#16105) --- clients/pkg/promtail/targets/lokipush/pushtarget.go | 2 +- pkg/distributor/distributor.go | 7 +++++++ pkg/distributor/http.go | 2 +- pkg/distributor/http_test.go | 1 + pkg/loghttp/push/otlp.go | 2 +- pkg/loghttp/push/push.go | 9 +++++---- pkg/loghttp/push/push_test.go | 9 +++++---- 7 files changed, 21 insertions(+), 11 deletions(-) diff --git a/clients/pkg/promtail/targets/lokipush/pushtarget.go b/clients/pkg/promtail/targets/lokipush/pushtarget.go index f6e33eb8f72d9..e1ebafc1bab2e 100644 --- a/clients/pkg/promtail/targets/lokipush/pushtarget.go +++ b/clients/pkg/promtail/targets/lokipush/pushtarget.go @@ -111,7 +111,7 @@ func (t *PushTarget) run() error { func (t *PushTarget) handleLoki(w http.ResponseWriter, r *http.Request) { logger := util_log.WithContext(r.Context(), util_log.Logger) userID, _ := tenant.TenantID(r.Context()) - req, err := push.ParseRequest(logger, userID, r, nil, push.EmptyLimits{}, push.ParseLokiRequest, nil, false) + req, err := push.ParseRequest(logger, userID, r, nil, push.EmptyLimits{}, push.ParseLokiRequest, nil, nil, false) if err != nil { level.Warn(t.logger).Log("msg", "failed to parse incoming push request", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 8c44a66832c44..40bc4f3b5b036 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -180,6 +180,7 @@ type Distributor struct { streamShardCount prometheus.Counter tenantPushSanitizedStructuredMetadata *prometheus.CounterVec + policyResolver push.PolicyResolver usageTracker push.UsageTracker ingesterTasks chan pushIngesterTask ingesterTaskWg sync.WaitGroup @@ -223,6 +224,11 @@ func New( return client.New(internalCfg, addr) } + policyResolver := push.PolicyResolver(func(userID string, lbs labels.Labels) string { + mappings := overrides.PoliciesStreamMapping(userID) + return mappings.PolicyFor(lbs) + }) + validator, err := NewValidator(overrides, usageTracker) if err != nil { return nil, err @@ -280,6 +286,7 @@ func New( healthyInstancesCount: atomic.NewUint32(0), rateLimitStrat: rateLimitStrat, tee: tee, + policyResolver: policyResolver, usageTracker: usageTracker, ingesterTasks: make(chan pushIngesterTask), ingesterAppends: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 1b0cee2a9c62a..c6c87dbc74454 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -41,7 +41,7 @@ func (d *Distributor) pushHandler(w http.ResponseWriter, r *http.Request, pushRe } logPushRequestStreams := d.tenantConfigs.LogPushRequestStreams(tenantID) - req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser, d.usageTracker, logPushRequestStreams) + req, err := push.ParseRequest(logger, tenantID, r, d.tenantsRetention, d.validator.Limits, pushRequestParser, d.usageTracker, d.policyResolver, logPushRequestStreams) if err != nil { if !errors.Is(err, push.ErrAllLogsFiltered) { if d.tenantConfigs.LogPushRequest(tenantID) { diff --git a/pkg/distributor/http_test.go b/pkg/distributor/http_test.go index 7e1ee788994c4..a73a73fa5e2ab 100644 --- a/pkg/distributor/http_test.go +++ b/pkg/distributor/http_test.go @@ -128,6 +128,7 @@ func (p *fakeParser) parseRequest( _ push.TenantsRetention, _ push.Limits, _ push.UsageTracker, + _ push.PolicyResolver, _ bool, _ log.Logger, ) (*logproto.PushRequest, *push.Stats, error) { diff --git a/pkg/loghttp/push/otlp.go b/pkg/loghttp/push/otlp.go index dbb4ec8349e63..55e5b59174868 100644 --- a/pkg/loghttp/push/otlp.go +++ b/pkg/loghttp/push/otlp.go @@ -43,7 +43,7 @@ func newPushStats() *Stats { } } -func ParseOTLPRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { +func ParseOTLPRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, _ PolicyResolver, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { stats := newPushStats() otlpLogs, err := extractLogs(r, stats) if err != nil { diff --git a/pkg/loghttp/push/push.go b/pkg/loghttp/push/push.go index 37938fe2a8e89..bc0d7aa8f4112 100644 --- a/pkg/loghttp/push/push.go +++ b/pkg/loghttp/push/push.go @@ -90,9 +90,10 @@ func (EmptyLimits) DiscoverServiceName(string) []string { } type ( - RequestParser func(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) + RequestParser func(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, policyResolver PolicyResolver, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) RequestParserWrapper func(inner RequestParser) RequestParser ErrorWriter func(w http.ResponseWriter, error string, code int, logger log.Logger) + PolicyResolver func(userID string, lbs labels.Labels) string ) type Stats struct { @@ -113,8 +114,8 @@ type Stats struct { IsAggregatedMetric bool } -func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser, tracker UsageTracker, logPushRequestStreams bool) (*logproto.PushRequest, error) { - req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits, tracker, logPushRequestStreams, logger) +func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, pushRequestParser RequestParser, tracker UsageTracker, policyResolver PolicyResolver, logPushRequestStreams bool) (*logproto.PushRequest, error) { + req, pushStats, err := pushRequestParser(userID, r, tenantsRetention, limits, tracker, policyResolver, logPushRequestStreams, logger) if err != nil && !errors.Is(err, ErrAllLogsFiltered) { return nil, err } @@ -171,7 +172,7 @@ func ParseRequest(logger log.Logger, userID string, r *http.Request, tenantsRete return req, err } -func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { +func ParseLokiRequest(userID string, r *http.Request, tenantsRetention TenantsRetention, limits Limits, tracker UsageTracker, _ PolicyResolver, logPushRequestStreams bool, logger log.Logger) (*logproto.PushRequest, *Stats, error) { // Body var body io.Reader // bodySize should always reflect the compressed size of the request body diff --git a/pkg/loghttp/push/push_test.go b/pkg/loghttp/push/push_test.go index 54618eb3480cc..2c6c1cefe31b6 100644 --- a/pkg/loghttp/push/push_test.go +++ b/pkg/loghttp/push/push_test.go @@ -270,6 +270,7 @@ func TestParseRequest(t *testing.T) { &fakeLimits{enabled: test.enableServiceDiscovery}, ParseLokiRequest, tracker, + nil, false, ) @@ -364,7 +365,7 @@ func Test_ServiceDetection(t *testing.T) { request := createRequest("/loki/api/v1/push", strings.NewReader(body)) limits := &fakeLimits{enabled: true, labels: []string{"foo"}} - data, err := ParseRequest(util_log.Logger, "fake", request, nil, limits, ParseLokiRequest, tracker, false) + data, err := ParseRequest(util_log.Logger, "fake", request, nil, limits, ParseLokiRequest, tracker, nil, false) require.NoError(t, err) require.Equal(t, labels.FromStrings("foo", "bar", LabelServiceName, "bar").String(), data.Streams[0].Labels) @@ -375,7 +376,7 @@ func Test_ServiceDetection(t *testing.T) { request := createRequest("/otlp/v1/push", bytes.NewReader(body)) limits := &fakeLimits{enabled: true} - data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, false) + data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, nil, false) require.NoError(t, err) require.Equal(t, labels.FromStrings("k8s_job_name", "bar", LabelServiceName, "bar").String(), data.Streams[0].Labels) }) @@ -389,7 +390,7 @@ func Test_ServiceDetection(t *testing.T) { labels: []string{"special"}, indexAttributes: []string{"special"}, } - data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, false) + data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, nil, false) require.NoError(t, err) require.Equal(t, labels.FromStrings("special", "sauce", LabelServiceName, "sauce").String(), data.Streams[0].Labels) }) @@ -403,7 +404,7 @@ func Test_ServiceDetection(t *testing.T) { labels: []string{"special"}, indexAttributes: []string{}, } - data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, false) + data, err := ParseRequest(util_log.Logger, "fake", request, limits, limits, ParseOTLPRequest, tracker, nil, false) require.NoError(t, err) require.Equal(t, labels.FromStrings(LabelServiceName, ServiceUnknown).String(), data.Streams[0].Labels) }) From 78becba7f79eb8dadc1b3cc11df58c8bf39e7b5a Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 5 Feb 2025 13:21:21 +0000 Subject: [PATCH 33/33] fix(deps): update module go.etcd.io/bbolt to v1.4.0 (main) (#16107) Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> --- go.mod | 4 +- go.sum | 8 +- vendor/github.com/spf13/pflag/.editorconfig | 12 + vendor/github.com/spf13/pflag/.golangci.yaml | 4 + vendor/github.com/spf13/pflag/flag.go | 29 +- vendor/github.com/spf13/pflag/ip.go | 3 + vendor/github.com/spf13/pflag/ipnet_slice.go | 147 ++++++ vendor/github.com/spf13/pflag/string_array.go | 4 - vendor/go.etcd.io/bbolt/.gitignore | 2 + vendor/go.etcd.io/bbolt/.go-version | 2 +- vendor/go.etcd.io/bbolt/Makefile | 62 ++- vendor/go.etcd.io/bbolt/OWNERS | 10 + vendor/go.etcd.io/bbolt/README.md | 68 ++- .../bbolt/{bolt_unix_aix.go => bolt_aix.go} | 1 - vendor/go.etcd.io/bbolt/bolt_android.go | 90 ++++ vendor/go.etcd.io/bbolt/bolt_arm64.go | 1 - vendor/go.etcd.io/bbolt/bolt_loong64.go | 1 - vendor/go.etcd.io/bbolt/bolt_mips64x.go | 1 - vendor/go.etcd.io/bbolt/bolt_mipsx.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64.go | 1 - vendor/go.etcd.io/bbolt/bolt_ppc64le.go | 1 - vendor/go.etcd.io/bbolt/bolt_riscv64.go | 1 - vendor/go.etcd.io/bbolt/bolt_s390x.go | 1 - .../{bolt_unix_solaris.go => bolt_solaris.go} | 0 vendor/go.etcd.io/bbolt/bolt_unix.go | 7 +- vendor/go.etcd.io/bbolt/bolt_windows.go | 8 +- vendor/go.etcd.io/bbolt/boltsync_unix.go | 1 - vendor/go.etcd.io/bbolt/bucket.go | 484 +++++++++++++----- vendor/go.etcd.io/bbolt/cursor.go | 99 ++-- vendor/go.etcd.io/bbolt/db.go | 451 ++++++++-------- vendor/go.etcd.io/bbolt/errors.go | 76 ++- vendor/go.etcd.io/bbolt/errors/errors.go | 84 +++ vendor/go.etcd.io/bbolt/freelist.go | 410 --------------- vendor/go.etcd.io/bbolt/freelist_hmap.go | 178 ------- .../bbolt/internal/common/bucket.go | 54 ++ .../go.etcd.io/bbolt/internal/common/inode.go | 115 +++++ .../go.etcd.io/bbolt/internal/common/meta.go | 161 ++++++ .../go.etcd.io/bbolt/internal/common/page.go | 391 ++++++++++++++ .../go.etcd.io/bbolt/internal/common/types.go | 40 ++ .../bbolt/{ => internal/common}/unsafe.go | 10 +- .../go.etcd.io/bbolt/internal/common/utils.go | 64 +++ .../bbolt/internal/common/verify.go | 67 +++ .../bbolt/internal/freelist/array.go | 108 ++++ .../bbolt/internal/freelist/freelist.go | 82 +++ .../bbolt/internal/freelist/hashmap.go | 292 +++++++++++ .../bbolt/internal/freelist/shared.go | 310 +++++++++++ vendor/go.etcd.io/bbolt/logger.go | 113 ++++ vendor/go.etcd.io/bbolt/mlock_unix.go | 1 - vendor/go.etcd.io/bbolt/node.go | 252 ++++----- vendor/go.etcd.io/bbolt/page.go | 212 -------- vendor/go.etcd.io/bbolt/tx.go | 234 +++++---- vendor/go.etcd.io/bbolt/tx_check.go | 206 +++++--- vendor/modules.txt | 9 +- 54 files changed, 3344 insertions(+), 1630 deletions(-) create mode 100644 vendor/github.com/spf13/pflag/.editorconfig create mode 100644 vendor/github.com/spf13/pflag/.golangci.yaml create mode 100644 vendor/github.com/spf13/pflag/ipnet_slice.go create mode 100644 vendor/go.etcd.io/bbolt/OWNERS rename vendor/go.etcd.io/bbolt/{bolt_unix_aix.go => bolt_aix.go} (99%) create mode 100644 vendor/go.etcd.io/bbolt/bolt_android.go rename vendor/go.etcd.io/bbolt/{bolt_unix_solaris.go => bolt_solaris.go} (100%) create mode 100644 vendor/go.etcd.io/bbolt/errors/errors.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist.go delete mode 100644 vendor/go.etcd.io/bbolt/freelist_hmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/bucket.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/inode.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/meta.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/page.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/types.go rename vendor/go.etcd.io/bbolt/{ => internal/common}/unsafe.go (74%) create mode 100644 vendor/go.etcd.io/bbolt/internal/common/utils.go create mode 100644 vendor/go.etcd.io/bbolt/internal/common/verify.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/array.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/freelist.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go create mode 100644 vendor/go.etcd.io/bbolt/internal/freelist/shared.go create mode 100644 vendor/go.etcd.io/bbolt/logger.go delete mode 100644 vendor/go.etcd.io/bbolt/page.go diff --git a/go.mod b/go.mod index 704546489b33c..c076139647c5b 100644 --- a/go.mod +++ b/go.mod @@ -95,7 +95,7 @@ require ( github.com/stretchr/testify v1.10.0 github.com/uber/jaeger-client-go v2.30.0+incompatible github.com/xdg-go/scram v1.1.2 - go.etcd.io/bbolt v1.3.11 + go.etcd.io/bbolt v1.4.0 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 golang.org/x/crypto v0.32.0 @@ -347,7 +347,7 @@ require ( github.com/sirupsen/logrus v1.9.3 github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/spf13/cast v1.7.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/ugorji/go/codec v1.1.7 // indirect diff --git a/go.sum b/go.sum index 2dc4062022a6a..4233a4c3ff30d 100644 --- a/go.sum +++ b/go.sum @@ -1097,8 +1097,8 @@ github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -1185,8 +1185,8 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ go.einride.tech/aip v0.68.1 h1:16/AfSxcQISGN5z9C5lM+0mLYXihrHbQ1onvYTr93aQ= go.einride.tech/aip v0.68.1/go.mod h1:XaFtaj4HuA3Zwk9xoBtTWgNubZ0ZZXv9BZJCkuKuWbg= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/bbolt v1.4.0 h1:TU77id3TnN/zKr7CO/uk+fBCwF2jGcMuw2B/FMAzYIk= +go.etcd.io/bbolt v1.4.0/go.mod h1:AsD+OCi/qPN1giOX1aiLAha3o1U8rAz65bvN4j0sRuk= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.4 h1:OHVyt3TopwtUQ2GKdd5wu3PmmipR4FTwCqoEjSyRdIc= go.etcd.io/etcd/api/v3 v3.5.4/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= diff --git a/vendor/github.com/spf13/pflag/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig new file mode 100644 index 0000000000000..4492e9f9fe15b --- /dev/null +++ b/vendor/github.com/spf13/pflag/.editorconfig @@ -0,0 +1,12 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +[*.go] +indent_style = tab diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml new file mode 100644 index 0000000000000..b274f248451b3 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.golangci.yaml @@ -0,0 +1,4 @@ +linters: + disable-all: true + enable: + - nolintlint diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 24a5036e95b61..7c058de3744a1 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -160,7 +160,7 @@ type FlagSet struct { args []string // arguments after flags argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- errorHandling ErrorHandling - output io.Writer // nil means stderr; use out() accessor + output io.Writer // nil means stderr; use Output() accessor interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName @@ -255,13 +255,20 @@ func (f *FlagSet) normalizeFlagName(name string) NormalizedName { return n(f, name) } -func (f *FlagSet) out() io.Writer { +// Output returns the destination for usage and error messages. os.Stderr is returned if +// output was not set or was set to nil. +func (f *FlagSet) Output() io.Writer { if f.output == nil { return os.Stderr } return f.output } +// Name returns the name of the flag set. +func (f *FlagSet) Name() string { + return f.name +} + // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { @@ -358,7 +365,7 @@ func (f *FlagSet) ShorthandLookup(name string) *Flag { } if len(name) > 1 { msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } c := name[0] @@ -482,7 +489,7 @@ func (f *FlagSet) Set(name, value string) error { } if flag.Deprecated != "" { - fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) + fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } return nil } @@ -523,7 +530,7 @@ func Set(name, value string) error { // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { usages := f.FlagUsages() - fmt.Fprint(f.out(), usages) + fmt.Fprint(f.Output(), usages) } // defaultIsZeroValue returns true if the default value for this flag represents @@ -758,7 +765,7 @@ func PrintDefaults() { // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { - fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name) f.PrintDefaults() } @@ -844,7 +851,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { _, alreadyThere := f.formal[normalizedFlagName] if alreadyThere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) - fmt.Fprintln(f.out(), msg) + fmt.Fprintln(f.Output(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { @@ -860,7 +867,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { } if len(flag.Shorthand) > 1 { msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } if f.shorthands == nil { @@ -870,7 +877,7 @@ func (f *FlagSet) AddFlag(flag *Flag) { used, alreadyThere := f.shorthands[c] if alreadyThere { msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name) - fmt.Fprintf(f.out(), msg) + fmt.Fprintf(f.Output(), msg) panic(msg) } f.shorthands[c] = flag @@ -909,7 +916,7 @@ func VarP(value Value, name, shorthand, usage string) { func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) if f.errorHandling != ContinueOnError { - fmt.Fprintln(f.out(), err) + fmt.Fprintln(f.Output(), err) f.usage() } return err @@ -1060,7 +1067,7 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } if flag.ShorthandDeprecated != "" { - fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) } err = fn(flag, value) diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go index 3d414ba69fe1d..06b8bcb572159 100644 --- a/vendor/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -16,6 +16,9 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { + if s == "" { + return nil + } ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go new file mode 100644 index 0000000000000..6b541aa8798cf --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet_slice.go @@ -0,0 +1,147 @@ +package pflag + +import ( + "fmt" + "io" + "net" + "strings" +) + +// -- ipNetSlice Value +type ipNetSliceValue struct { + value *[]net.IPNet + changed bool +} + +func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue { + ipnsv := new(ipNetSliceValue) + ipnsv.value = p + *ipnsv.value = val + return ipnsv +} + +// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag. +// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended. +func (s *ipNetSliceValue) Set(val string) error { + + // remove all quote characters + rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "") + + // read flag arguments with CSV parser + ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val)) + if err != nil && err != io.EOF { + return err + } + + // parse ip values into slice + out := make([]net.IPNet, 0, len(ipNetStrSlice)) + for _, ipNetStr := range ipNetStrSlice { + _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr)) + if err != nil { + return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr) + } + out = append(out, *n) + } + + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + + s.changed = true + + return nil +} + +// Type returns a string that uniquely represents this flag's type. +func (s *ipNetSliceValue) Type() string { + return "ipNetSlice" +} + +// String defines a "native" format for this net.IPNet slice flag value. +func (s *ipNetSliceValue) String() string { + + ipNetStrSlice := make([]string, len(*s.value)) + for i, n := range *s.value { + ipNetStrSlice[i] = n.String() + } + + out, _ := writeAsCSV(ipNetStrSlice) + return "[" + out + "]" +} + +func ipNetSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Emtpy string would cause a slice with one (empty) entry + if len(val) == 0 { + return []net.IPNet{}, nil + } + ss := strings.Split(val, ",") + out := make([]net.IPNet, len(ss)) + for i, sval := range ss { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err != nil { + return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval) + } + out[i] = *n + } + return out, nil +} + +// GetIPNetSlice returns the []net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) { + val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv) + if err != nil { + return []net.IPNet{}, err + } + return val.([]net.IPNet), nil +} + +// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string. +// The argument p points to a []net.IPNet variable in which to store the value of the flag. +func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage) +} + +// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) { + CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage) +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IPNet variable that stores the value of that flag. +func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, "", value, usage) + return &p +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + p := []net.IPNet{} + f.IPNetSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of a []net.IP variable that stores the value of the flag. +func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, "", value, usage) +} + +// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash. +func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet { + return CommandLine.IPNetSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 4894af818023b..d1ff0a96ba0b5 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -31,11 +31,7 @@ func (s *stringArrayValue) Append(val string) error { func (s *stringArrayValue) Replace(val []string) error { out := make([]string, len(val)) for i, d := range val { - var err error out[i] = d - if err != nil { - return err - } } *s.value = out return nil diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore index 9fa948ebf9d5f..ed4d259db23ff 100644 --- a/vendor/go.etcd.io/bbolt/.gitignore +++ b/vendor/go.etcd.io/bbolt/.gitignore @@ -6,5 +6,7 @@ cover.out cover-*.out /.idea *.iml +/bbolt /cmd/bbolt/bbolt +.DS_Store diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index 013173af5e9bc..d8c40e539ce6b 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.22.6 +1.23.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 21407797416eb..f5a6703a0bb32 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -1,6 +1,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" +GOFILES = $(shell find . -name \*.go) TESTFLAGS_RACE=-race=false ifdef ENABLE_RACE @@ -13,9 +14,26 @@ ifdef CPU endif TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS) +TESTFLAGS_TIMEOUT=30m +ifdef TIMEOUT + TESTFLAGS_TIMEOUT=$(TIMEOUT) +endif + +TESTFLAGS_ENABLE_STRICT_MODE=false +ifdef ENABLE_STRICT_MODE + TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE) +endif + +.EXPORT_ALL_VARIABLES: +TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE} + .PHONY: fmt fmt: - !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]') + @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh" + @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]') + + @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh" + @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]') .PHONY: lint lint: @@ -24,21 +42,23 @@ lint: .PHONY: test test: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT} + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/... + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt .PHONY: coverage coverage: @echo "hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v -timeout 30m \ + TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-hashmap.out -covermode atomic @echo "array freelist test" - TEST_FREELIST_TYPE=array go test -v -timeout 30m \ + TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \ -coverprofile cover-freelist-array.out -covermode atomic BOLT_CMD=bbolt @@ -55,7 +75,7 @@ gofail-enable: install-gofail gofail enable . .PHONY: gofail-disable -gofail-disable: +gofail-disable: install-gofail gofail disable . .PHONY: install-gofail @@ -65,12 +85,24 @@ install-gofail: .PHONY: test-failpoint test-failpoint: @echo "[failpoint] hashmap freelist test" - TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint @echo "[failpoint] array freelist test" - TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint - -.PHONY: test-robustness # Running robustness tests requires root permission -test-robustness: - go test -v ${TESTFLAGS} ./tests/dmflakey -test.root - go test -v ${TESTFLAGS} ./tests/robustness -test.root + BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint + +.PHONY: test-robustness # Running robustness tests requires root permission for now +# TODO: Remove sudo once we fully migrate to the prow infrastructure +test-robustness: gofail-enable build + sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root + +.PHONY: test-benchmark-compare +# Runs benchmark tests on the current git ref and the given REF, and compares +# the two. +test-benchmark-compare: install-benchstat + @git fetch + ./scripts/compare_benchmarks.sh $(REF) + +.PHONY: install-benchstat +install-benchstat: + go install golang.org/x/perf/cmd/benchstat@latest diff --git a/vendor/go.etcd.io/bbolt/OWNERS b/vendor/go.etcd.io/bbolt/OWNERS new file mode 100644 index 0000000000000..91f168a798085 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/OWNERS @@ -0,0 +1,10 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - ahrtr # Benjamin Wang + - serathius # Marek Siarkowicz + - ptabor # Piotr Tabor + - spzala # Sahdev Zala +reviewers: + - fuweid # Wei Fu + - tjungblu # Thomas Jungblut diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md index 495a93ef8f38d..f365e51e3eb4d 100644 --- a/vendor/go.etcd.io/bbolt/README.md +++ b/vendor/go.etcd.io/bbolt/README.md @@ -1,10 +1,8 @@ bbolt ===== -[![Go Report Card](https://goreportcard.com/badge/github.com/etcd-io/bbolt?style=flat-square)](https://goreportcard.com/report/github.com/etcd-io/bbolt) -[![Coverage](https://codecov.io/gh/etcd-io/bbolt/branch/master/graph/badge.svg)](https://codecov.io/gh/etcd-io/bbolt) -[![Build Status Travis](https://img.shields.io/travis/etcd-io/bboltlabs.svg?style=flat-square&&branch=master)](https://travis-ci.com/etcd-io/bbolt) -[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/etcd-io/bbolt) +[![Go Report Card](https://goreportcard.com/badge/go.etcd.io/bbolt?style=flat-square)](https://goreportcard.com/report/go.etcd.io/bbolt) +[![Go Reference](https://pkg.go.dev/badge/go.etcd.io/bbolt.svg)](https://pkg.go.dev/go.etcd.io/bbolt) [![Releases](https://img.shields.io/github/release/etcd-io/bbolt/all.svg?style=flat-square)](https://github.com/etcd-io/bbolt/releases) [![LICENSE](https://img.shields.io/github/license/etcd-io/bbolt.svg?style=flat-square)](https://github.com/etcd-io/bbolt/blob/master/LICENSE) @@ -71,13 +69,14 @@ New minor versions may add additional features to the API. - [LMDB](#lmdb) - [Caveats & Limitations](#caveats--limitations) - [Reading the Source](#reading-the-source) + - [Known Issues](#known-issues) - [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started ### Installing -To start using Bolt, install Go and run `go get`: +To start using `bbolt`, install Go and run `go get`: ```sh $ go get go.etcd.io/bbolt@latest ``` @@ -103,7 +102,7 @@ To use bbolt as an embedded key-value store, import as: ```go import bolt "go.etcd.io/bbolt" -db, err := bolt.Open(path, 0666, nil) +db, err := bolt.Open(path, 0600, nil) if err != nil { return err } @@ -298,6 +297,17 @@ db.Update(func(tx *bolt.Tx) error { }) ``` +You can retrieve an existing bucket using the `Tx.Bucket()` function: +```go +db.Update(func(tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + if b == nil { + return errors.New("bucket does not exist") + } + return nil +}) +``` + You can also create a bucket only if it doesn't exist by using the `Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this function for all your top-level buckets after you open your database so you can @@ -305,6 +315,17 @@ guarantee that they exist for future transactions. To delete a bucket, simply call the `Tx.DeleteBucket()` function. +You can also iterate over all existing top-level buckets with `Tx.ForEach()`: + +```go +db.View(func(tx *bolt.Tx) error { + tx.ForEach(func(name []byte, b *bolt.Bucket) error { + fmt.Println(string(name)) + return nil + }) + return nil +}) +``` ### Using key/value pairs @@ -336,7 +357,17 @@ exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. -Use the `Bucket.Delete()` function to delete a key from the bucket. +Use the `Bucket.Delete()` function to delete a key from the bucket: + +```go +db.Update(func (tx *bolt.Tx) error { + b := tx.Bucket([]byte("MyBucket")) + err := b.Delete([]byte("answer")) + return err +}) +``` + +This will delete the key `answers` from the bucket `MyBucket`. Please note that values returned from `Get()` are only valid while the transaction is open. If you need to use a value outside of the transaction @@ -654,7 +685,7 @@ uses a shared lock to allow multiple processes to read from the database but it will block any processes from opening the database in read-write mode. ```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) +db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true}) if err != nil { log.Fatal(err) } @@ -890,7 +921,7 @@ The best places to start are the main entry points into Bolt: - `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket + where the key & value will be written. Once the position is found, the bucket materializes the underlying page and the page's parent pages into memory as "nodes". These nodes are where mutations occur during read-write transactions. These changes get flushed to disk during commit. @@ -919,6 +950,21 @@ The best places to start are the main entry points into Bolt: If you have additional notes that could be helpful for others, please submit them via pull request. +## Known Issues + +- bbolt might run into data corruption issue on Linux when the feature + [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in + linux kernel version v5.10, is enabled. The fixes to the issue were included in + linux kernel version v5.17, please refer to links below, + + * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/) + * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e) + * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/) + + Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562. + +- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value. + Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802). ## Other Projects Using Bolt @@ -934,13 +980,16 @@ Below is a list of public, open source projects that use Bolt: * [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. * [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. +* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API. * [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet. * [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining simple tx and key scans. +* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. +* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime * [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. * [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. @@ -964,6 +1013,7 @@ Below is a list of public, open source projects that use Bolt: * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage. +* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library. * [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi. diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go similarity index 99% rename from vendor/go.etcd.io/bbolt/bolt_unix_aix.go rename to vendor/go.etcd.io/bbolt/bolt_aix.go index 6dea4294dc7db..4b424ed4c4e37 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix_aix.go +++ b/vendor/go.etcd.io/bbolt/bolt_aix.go @@ -1,5 +1,4 @@ //go:build aix -// +build aix package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go new file mode 100644 index 0000000000000..11890f0d70592 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/bolt_android.go @@ -0,0 +1,90 @@ +package bbolt + +import ( + "fmt" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// flock acquires an advisory lock on a file descriptor. +func flock(db *DB, exclusive bool, timeout time.Duration) error { + var t time.Time + if timeout != 0 { + t = time.Now() + } + fd := db.file.Fd() + var lockType int16 + if exclusive { + lockType = syscall.F_WRLCK + } else { + lockType = syscall.F_RDLCK + } + for { + // Attempt to obtain an exclusive lock. + lock := syscall.Flock_t{Type: lockType} + err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock) + if err == nil { + return nil + } else if err != syscall.EAGAIN { + return err + } + + // If we timed out then return an error. + if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { + return ErrTimeout + } + + // Wait for a bit and try again. + time.Sleep(flockRetryTimeout) + } +} + +// funlock releases an advisory lock on a file descriptor. +func funlock(db *DB) error { + var lock syscall.Flock_t + lock.Start = 0 + lock.Len = 0 + lock.Type = syscall.F_UNLCK + lock.Whence = 0 + return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) +} + +// mmap memory maps a DB's data file. +func mmap(db *DB, sz int) error { + // Map the data file to memory. + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) + if err != nil { + return err + } + + // Advise the kernel that the mmap is accessed randomly. + err = unix.Madvise(b, syscall.MADV_RANDOM) + if err != nil && err != syscall.ENOSYS { + // Ignore not implemented error in kernel because it still works. + return fmt.Errorf("madvise: %s", err) + } + + // Save the original byte slice and convert to a byte array pointer. + db.dataref = b + db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) + db.datasz = sz + return nil +} + +// munmap unmaps a DB's data file from memory. +func munmap(db *DB) error { + // Ignore the unmap if we have no mapped data. + if db.dataref == nil { + return nil + } + + // Unmap using the original byte slice. + err := unix.Munmap(db.dataref) + db.dataref = nil + db.data = nil + db.datasz = 0 + return err +} diff --git a/vendor/go.etcd.io/bbolt/bolt_arm64.go b/vendor/go.etcd.io/bbolt/bolt_arm64.go index 447bc19733274..2c67ab10cd01e 100644 --- a/vendor/go.etcd.io/bbolt/bolt_arm64.go +++ b/vendor/go.etcd.io/bbolt/bolt_arm64.go @@ -1,5 +1,4 @@ //go:build arm64 -// +build arm64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_loong64.go b/vendor/go.etcd.io/bbolt/bolt_loong64.go index 31c17c1d07042..1ef2145c67c55 100644 --- a/vendor/go.etcd.io/bbolt/bolt_loong64.go +++ b/vendor/go.etcd.io/bbolt/bolt_loong64.go @@ -1,5 +1,4 @@ //go:build loong64 -// +build loong64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/bolt_mips64x.go index a9385beb6824b..f28a0512a1da8 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mips64x.go +++ b/vendor/go.etcd.io/bbolt/bolt_mips64x.go @@ -1,5 +1,4 @@ //go:build mips64 || mips64le -// +build mips64 mips64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/bolt_mipsx.go index ed734ff7f3099..708fccdc010b5 100644 --- a/vendor/go.etcd.io/bbolt/bolt_mipsx.go +++ b/vendor/go.etcd.io/bbolt/bolt_mipsx.go @@ -1,5 +1,4 @@ //go:build mips || mipsle -// +build mips mipsle package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc.go b/vendor/go.etcd.io/bbolt/bolt_ppc.go index e403f57d8a87d..6a21cf33c7920 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc.go @@ -1,5 +1,4 @@ //go:build ppc -// +build ppc package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/bolt_ppc64.go index fcd86529f931d..a32f2462281f7 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64.go @@ -1,5 +1,4 @@ //go:build ppc64 -// +build ppc64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go index 20234aca466a3..8fb60dddcb254 100644 --- a/vendor/go.etcd.io/bbolt/bolt_ppc64le.go +++ b/vendor/go.etcd.io/bbolt/bolt_ppc64le.go @@ -1,5 +1,4 @@ //go:build ppc64le -// +build ppc64le package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/bolt_riscv64.go index 060f30c73cb95..a63d26ab21d46 100644 --- a/vendor/go.etcd.io/bbolt/bolt_riscv64.go +++ b/vendor/go.etcd.io/bbolt/bolt_riscv64.go @@ -1,5 +1,4 @@ //go:build riscv64 -// +build riscv64 package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_s390x.go b/vendor/go.etcd.io/bbolt/bolt_s390x.go index 92d2755adb4c3..749ea97e3a0f1 100644 --- a/vendor/go.etcd.io/bbolt/bolt_s390x.go +++ b/vendor/go.etcd.io/bbolt/bolt_s390x.go @@ -1,5 +1,4 @@ //go:build s390x -// +build s390x package bbolt diff --git a/vendor/go.etcd.io/bbolt/bolt_unix_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go similarity index 100% rename from vendor/go.etcd.io/bbolt/bolt_unix_solaris.go rename to vendor/go.etcd.io/bbolt/bolt_solaris.go diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go index 757ae4d1a484f..d1922c2d99d00 100644 --- a/vendor/go.etcd.io/bbolt/bolt_unix.go +++ b/vendor/go.etcd.io/bbolt/bolt_unix.go @@ -1,5 +1,4 @@ -//go:build !windows && !plan9 && !solaris && !aix -// +build !windows,!plan9,!solaris,!aix +//go:build !windows && !plan9 && !solaris && !aix && !android package bbolt @@ -10,6 +9,8 @@ import ( "unsafe" "golang.org/x/sys/unix" + + "go.etcd.io/bbolt/errors" ) // flock acquires an advisory lock on a file descriptor. @@ -36,7 +37,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed out then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go index e5dde27454f29..ec21ecb85c7d8 100644 --- a/vendor/go.etcd.io/bbolt/bolt_windows.go +++ b/vendor/go.etcd.io/bbolt/bolt_windows.go @@ -8,6 +8,8 @@ import ( "unsafe" "golang.org/x/sys/windows" + + "go.etcd.io/bbolt/errors" ) // fdatasync flushes written data to a file descriptor. @@ -42,7 +44,7 @@ func flock(db *DB, exclusive bool, timeout time.Duration) error { // If we timed oumercit then return an error. if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout { - return ErrTimeout + return errors.ErrTimeout } // Wait for a bit and try again. @@ -70,7 +72,7 @@ func mmap(db *DB, sz int) error { return fmt.Errorf("truncate: %s", err) } sizehi = uint32(sz >> 32) - sizelo = uint32(sz) & 0xffffffff + sizelo = uint32(sz) } // Open a file mapping handle. @@ -93,7 +95,7 @@ func mmap(db *DB, sz int) error { } // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) + db.data = (*[maxMapSize]byte)(unsafe.Pointer(addr)) db.datasz = sz return nil diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go index 81e09a5310f85..27face752edc8 100644 --- a/vendor/go.etcd.io/bbolt/boltsync_unix.go +++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go @@ -1,5 +1,4 @@ //go:build !windows && !plan9 && !linux && !openbsd -// +build !windows,!plan9,!linux,!openbsd package bbolt diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go index f3533d3446b8d..6371ace972ce1 100644 --- a/vendor/go.etcd.io/bbolt/bucket.go +++ b/vendor/go.etcd.io/bbolt/bucket.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "unsafe" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) const ( @@ -14,8 +17,6 @@ const ( MaxValueSize = (1 << 31) - 2 ) -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - const ( minFillPercent = 0.1 maxFillPercent = 1.0 @@ -27,12 +28,12 @@ const DefaultFillPercent = 0.5 // Bucket represents a collection of key/value pairs inside the database. type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache + *common.InBucket + tx *Tx // the associated transaction + buckets map[string]*Bucket // subbucket cache + page *common.Page // inline page reference + rootNode *node // materialized node for the root page. + nodes map[common.Pgid]*node // node cache // Sets the threshold for filling nodes when they split. By default, // the bucket will fill to 50% but it can be useful to increase this @@ -42,21 +43,12 @@ type Bucket struct { FillPercent float64 } -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - // newBucket returns a new bucket associated with a transaction. func newBucket(tx *Tx) Bucket { var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} if tx.writable { b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) + b.nodes = make(map[common.Pgid]*node) } return b } @@ -67,8 +59,8 @@ func (b *Bucket) Tx() *Tx { } // Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root +func (b *Bucket) Root() common.Pgid { + return b.RootPage() } // Writable returns whether the bucket is writable. @@ -105,7 +97,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket { k, v, flags := c.seek(name) // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { + if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 { return nil } @@ -125,8 +117,8 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // Unaligned access requires a copy to be made. const unalignedMask = unsafe.Alignof(struct { - bucket - page + common.InBucket + common.Page }{}) - 1 unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0 if unaligned { @@ -136,15 +128,15 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // If this is a writable transaction then we need to copy the bucket entry. // Read-only transactions can point directly at the mmap entry. if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = &common.InBucket{} + *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0])) } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) + child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0])) } // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + if child.RootPage() == 0 { + child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) } return &child @@ -153,13 +145,23 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { +func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket %q successfully", key) + } + }() + } if b.tx.db == nil { - return nil, ErrTxClosed + return nil, errors.ErrTxClosed } else if !b.tx.writable { - return nil, ErrTxNotWritable + return nil, errors.ErrTxNotWritable } else if len(key) == 0 { - return nil, ErrBucketNameRequired + return nil, errors.ErrBucketNameRequired } // Insert into node. @@ -173,21 +175,21 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // Return an error if there is an existing key. if bytes.Equal(newKey, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists + if (flags & common.BucketLeafFlag) != 0 { + return nil, errors.ErrBucketExists } - return nil, ErrIncompatibleValue + return nil, errors.ErrIncompatibleValue } // Create empty, inline bucket. var bucket = Bucket{ - bucket: &bucket{}, + InBucket: &common.InBucket{}, rootNode: &node{isLeaf: true}, FillPercent: DefaultFillPercent, } var value = bucket.write() - c.node().put(newKey, newKey, value, 0, bucketLeafFlag) + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) // Since subbuckets are not allowed on inline buckets, we need to // dereference the inline page, if it exists. This will cause the bucket @@ -200,39 +202,108 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. // The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err +func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Creating bucket if not exist %q", key) + defer func() { + if err != nil { + lg.Errorf("Creating bucket if not exist %q failed: %v", key, err) + } else { + lg.Debugf("Creating bucket if not exist %q successfully", key) + } + }() + } + + if b.tx.db == nil { + return nil, errors.ErrTxClosed + } else if !b.tx.writable { + return nil, errors.ErrTxNotWritable + } else if len(key) == 0 { + return nil, errors.ErrBucketNameRequired + } + + // Insert into node. + // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent + // it from being marked as leaking, and accordingly cannot be allocated on stack. + newKey := cloneBytes(key) + + if b.buckets != nil { + if child := b.buckets[string(newKey)]; child != nil { + return child, nil + } + } + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if there is an existing non-bucket key. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + var child = b.openBucket(v) + if b.buckets != nil { + b.buckets[string(newKey)] = child + } + + return child, nil + } + return nil, errors.ErrIncompatibleValue + } + + // Create empty, inline bucket. + var bucket = Bucket{ + InBucket: &common.InBucket{}, + rootNode: &node{isLeaf: true}, + FillPercent: DefaultFillPercent, } - return child, nil + var value = bucket.write() + + c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag) + + // Since subbuckets are not allowed on inline buckets, we need to + // dereference the inline page, if it exists. This will cause the bucket + // to be treated as a regular, non-inline bucket for the rest of the tx. + b.page = nil + + return b.Bucket(newKey), nil } // DeleteBucket deletes a bucket at the given key. // Returns an error if the bucket does not exist, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { +func (b *Bucket) DeleteBucket(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting bucket %q failed: %v", key, err) + } else { + lg.Debugf("Deleting bucket %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } + newKey := cloneBytes(key) + // Move cursor to correct position. c := b.Cursor() - k, _, flags := c.seek(key) + k, _, flags := c.seek(newKey) // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + return errors.ErrIncompatibleValue } // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEachBucket(func(k []byte) error { + child := b.Bucket(newKey) + err = child.ForEachBucket(func(k []byte) error { if err := child.DeleteBucket(k); err != nil { return fmt.Errorf("delete bucket: %s", err) } @@ -243,7 +314,7 @@ func (b *Bucket) DeleteBucket(key []byte) error { } // Remove cached copy. - delete(b.buckets, string(key)) + delete(b.buckets, string(newKey)) // Release all bucket pages to freelist. child.nodes = nil @@ -251,19 +322,119 @@ func (b *Bucket) DeleteBucket(key []byte) error { child.free() // Delete the node if we have a matching key. - c.node().del(key) + c.node().del(newKey) return nil } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. or the key represents a non-bucket value; +// 4. the source and destination buckets are the same. +func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) { + lg := b.tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Moving bucket %q", key) + defer func() { + if err != nil { + lg.Errorf("Moving bucket %q failed: %v", key, err) + } else { + lg.Debugf("Moving bucket %q successfully", key) + } + }() + } + + if b.tx.db == nil || dstBucket.tx.db == nil { + return errors.ErrTxClosed + } else if !b.Writable() || !dstBucket.Writable() { + return errors.ErrTxNotWritable + } + + if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx { + lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path()) + return errors.ErrDifferentDB + } + + newKey := cloneBytes(key) + + // Move cursor to correct position. + c := b.Cursor() + k, v, flags := c.seek(newKey) + + // Return an error if bucket doesn't exist or is not a bucket. + if !bytes.Equal(newKey, k) { + return errors.ErrBucketNotFound + } else if (flags & common.BucketLeafFlag) == 0 { + lg.Errorf("An incompatible key %s exists in the source bucket", newKey) + return errors.ErrIncompatibleValue + } + + // Do nothing (return true directly) if the source bucket and the + // destination bucket are actually the same bucket. + if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) { + lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket) + return errors.ErrSameBuckets + } + + // check whether the key already exists in the destination bucket + curDst := dstBucket.Cursor() + k, _, flags = curDst.seek(newKey) + + // Return an error if there is an existing key in the destination bucket. + if bytes.Equal(newKey, k) { + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrBucketExists + } + lg.Errorf("An incompatible key %s exists in the target bucket", newKey) + return errors.ErrIncompatibleValue + } + + // remove the sub-bucket from the source bucket + delete(b.buckets, string(newKey)) + c.node().del(newKey) + + // add te sub-bucket to the destination bucket + newValue := cloneBytes(v) + curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag) + + return nil +} + +// Inspect returns the structure of the bucket. +func (b *Bucket) Inspect() BucketStructure { + return b.recursivelyInspect([]byte("root")) +} + +func (b *Bucket) recursivelyInspect(name []byte) BucketStructure { + bs := BucketStructure{Name: string(name)} + + keyN := 0 + c := b.Cursor() + for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { + if flags&common.BucketLeafFlag != 0 { + childBucket := b.Bucket(k) + childBS := childBucket.recursivelyInspect(k) + bs.Children = append(bs.Children, childBS) + } else { + keyN++ + } + } + bs.KeyN = keyN + + return bs +} + // Get retrieves the value for a key in the bucket. // Returns a nil value if the key does not exist or if the key is a nested bucket. // The returned value is only valid for the life of the transaction. +// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database. func (b *Bucket) Get(key []byte) []byte { k, v, flags := b.Cursor().seek(key) // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { + if (flags & common.BucketLeafFlag) != 0 { return nil } @@ -278,17 +449,27 @@ func (b *Bucket) Get(key []byte) []byte { // If the key exist then its previous value will be overwritten. // Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { +func (b *Bucket) Put(key []byte, value []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Putting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Putting key %q failed: %v", key, err) + } else { + lg.Debugf("Putting key %q successfully", key) + } + }() + } if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } else if len(key) == 0 { - return ErrKeyRequired + return errors.ErrKeyRequired } else if len(key) > MaxKeySize { - return ErrKeyTooLarge + return errors.ErrKeyTooLarge } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge + return errors.ErrValueTooLarge } // Insert into node. @@ -301,8 +482,8 @@ func (b *Bucket) Put(key []byte, value []byte) error { k, _, flags := c.seek(newKey) // Return an error if there is an existing key with a bucket value. - if bytes.Equal(newKey, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // gofail: var beforeBucketPut struct{} @@ -315,11 +496,22 @@ func (b *Bucket) Put(key []byte, value []byte) error { // Delete removes a key from the bucket. // If the key does not exist then nothing is done and a nil error is returned. // Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { +func (b *Bucket) Delete(key []byte) (err error) { + if lg := b.tx.db.Logger(); lg != discardLogger { + lg.Debugf("Deleting key %q", key) + defer func() { + if err != nil { + lg.Errorf("Deleting key %q failed: %v", key, err) + } else { + lg.Debugf("Deleting key %q successfully", key) + } + }() + } + if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Move cursor to correct position. @@ -332,8 +524,8 @@ func (b *Bucket) Delete(key []byte) error { } // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } // Delete the node if we have a matching key. @@ -343,44 +535,46 @@ func (b *Bucket) Delete(key []byte) error { } // Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } +func (b *Bucket) Sequence() uint64 { + return b.InSequence() +} // SetSequence updates the sequence number for the bucket. func (b *Bucket) SetSequence(v uint64) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !b.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Set the sequence. - b.bucket.sequence = v + b.SetInSequence(v) return nil } // NextSequence returns an autoincrementing integer for the bucket. func (b *Bucket) NextSequence() (uint64, error) { if b.tx.db == nil { - return 0, ErrTxClosed + return 0, errors.ErrTxClosed } else if !b.Writable() { - return 0, ErrTxNotWritable + return 0, errors.ErrTxNotWritable } // Materialize the root node if it hasn't been already so that the // bucket will be saved during commit. if b.rootNode == nil { - _ = b.node(b.root, nil) + _ = b.node(b.RootPage(), nil) } // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil + b.IncSequence() + return b.Sequence(), nil } // ForEach executes a function for each key/value pair in a bucket. @@ -390,7 +584,7 @@ func (b *Bucket) NextSequence() (uint64, error) { // the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -403,11 +597,11 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error { func (b *Bucket) ForEachBucket(fn func(k []byte) error) error { if b.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } c := b.Cursor() for k, _, flags := c.first(); k != nil; k, _, flags = c.next() { - if flags&bucketLeafFlag != 0 { + if flags&common.BucketLeafFlag != 0 { if err := fn(k); err != nil { return err } @@ -421,64 +615,64 @@ func (b *Bucket) Stats() BucketStats { var s, subStats BucketStats pageSize := b.tx.db.pageSize s.BucketN += 1 - if b.root == 0 { + if b.RootPage() == 0 { s.InlineBucketN += 1 } - b.forEachPage(func(p *page, depth int, pgstack []pgid) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) + b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) { + if p.IsLeafPage() { + s.KeyN += int(p.Count()) // used totals the used bytes for the page - used := pageHeaderSize + used := common.PageHeaderSize - if p.count != 0 { + if p.Count() != 0 { // If page has any elements, add all element headers. - used += leafPageElementSize * uintptr(p.count-1) + used += common.LeafPageElementSize * uintptr(p.Count()-1) // Add all element key, value sizes. // The computation takes advantage of the fact that the position // of the last element's key/value equals to the total of the sizes // of all previous elements' keys and values. // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize) + lastElement := p.LeafPageElement(p.Count() - 1) + used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize()) } - if b.root == 0 { + if b.RootPage() == 0 { // For inlined bucket just update the inline stats s.InlineBucketInuse += int(used) } else { // For non-inlined bucket update all the leaf stats s.LeafPageN++ s.LeafInuse += int(used) - s.LeafOverflowN += int(p.overflow) + s.LeafOverflowN += int(p.Overflow()) // Collect stats from sub-buckets. // Do that by iterating over all element headers // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { + for i := uint16(0); i < p.Count(); i++ { + e := p.LeafPageElement(i) + if (e.Flags() & common.BucketLeafFlag) != 0 { // For any bucket element, open the element value // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) + subStats.Add(b.openBucket(e.Value()).Stats()) } } } - } else if (p.flags & branchPageFlag) != 0 { + } else if p.IsBranchPage() { s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) + lastElement := p.BranchPageElement(p.Count() - 1) // used totals the used bytes for the page // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1)) + used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1)) // Add size of all keys and values. // Again, use the fact that last element's position equals to // the total of key, value sizes of all previous elements. - used += uintptr(lastElement.pos + lastElement.ksize) + used += uintptr(lastElement.Pos() + lastElement.Ksize()) s.BranchInuse += int(used) - s.BranchOverflowN += int(p.overflow) + s.BranchOverflowN += int(p.Overflow()) } // Keep track of maximum page depth. @@ -499,29 +693,29 @@ func (b *Bucket) Stats() BucketStats { } // forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int, []pgid)) { +func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) { // If we have an inline page then just use that. if b.page != nil { - fn(b.page, 0, []pgid{b.root}) + fn(b.page, 0, []common.Pgid{b.RootPage()}) return } // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, fn) + b.tx.forEachPage(b.RootPage(), fn) } // forEachPageNode iterates over every page (or node) in a bucket. // This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { +func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) { // If we have an inline page or root node then just use that. if b.page != nil { fn(b.page, nil, 0) return } - b._forEachPageNode(b.root, 0, fn) + b._forEachPageNode(b.RootPage(), 0, fn) } -func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, int)) { +func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) { var p, n = b.pageNode(pgId) // Execute function. @@ -529,16 +723,16 @@ func (b *Bucket) _forEachPageNode(pgId pgid, depth int, fn func(*page, *node, in // Recursively loop over children. if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + b._forEachPageNode(elem.Pgid(), depth+1, fn) } } } else { if !n.isLeaf { for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) + b._forEachPageNode(inode.Pgid(), depth+1, fn) } } } @@ -561,9 +755,9 @@ func (b *Bucket) spill() error { } // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket + value = make([]byte, unsafe.Sizeof(common.InBucket{})) + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *child.InBucket } // Skip writing the bucket if there are no materialized nodes. @@ -577,10 +771,10 @@ func (b *Bucket) spill() error { if !bytes.Equal([]byte(name), k) { panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) } - if flags&bucketLeafFlag == 0 { + if flags&common.BucketLeafFlag == 0 { panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) + c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag) } // Ignore if there's not a materialized root node. @@ -595,16 +789,16 @@ func (b *Bucket) spill() error { b.rootNode = b.rootNode.root() // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) + if b.rootNode.pgid >= b.tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid())) } - b.root = b.rootNode.pgid + b.SetRootPage(b.rootNode.pgid) return nil } // inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. +// and if it contains no subbuckets. Otherwise, returns false. func (b *Bucket) inlineable() bool { var n = b.rootNode @@ -615,11 +809,11 @@ func (b *Bucket) inlineable() bool { // Bucket is not inlineable if it contains subbuckets or if it goes beyond // our threshold for inline bucket size. - var size = pageHeaderSize + var size = common.PageHeaderSize for _, inode := range n.inodes { - size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value)) + size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) - if inode.flags&bucketLeafFlag != 0 { + if inode.Flags()&common.BucketLeafFlag != 0 { return false } else if size > b.maxInlineBucketSize() { return false @@ -638,14 +832,14 @@ func (b *Bucket) maxInlineBucketSize() uintptr { func (b *Bucket) write() []byte { // Allocate the appropriate size. var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) + var value = make([]byte, common.BucketHeaderSize+n.size()) // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket + var bucket = (*common.InBucket)(unsafe.Pointer(&value[0])) + *bucket = *b.InBucket // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) + var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize])) n.write(p) return value @@ -662,8 +856,8 @@ func (b *Bucket) rebalance() { } // node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgId pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") +func (b *Bucket) node(pgId common.Pgid, parent *node) *node { + common.Assert(b.nodes != nil, "nodes map expected") // Retrieve node if it's already been created. if n := b.nodes[pgId]; n != nil { @@ -682,6 +876,12 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { var p = b.page if p == nil { p = b.tx.page(pgId) + } else { + // if p isn't nil, then it's an inline bucket. + // The pgId must be 0 in this case. + common.Verify(func() { + common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId) + }) } // Read the page into the node and cache it. @@ -696,19 +896,19 @@ func (b *Bucket) node(pgId pgid, parent *node) *node { // free recursively frees all pages in the bucket. func (b *Bucket) free() { - if b.root == 0 { + if b.RootPage() == 0 { return } var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { + b.forEachPageNode(func(p *common.Page, n *node, _ int) { if p != nil { - tx.db.freelist.free(tx.meta.txid, p) + tx.db.freelist.Free(tx.meta.Txid(), p) } else { n.free() } }) - b.root = 0 + b.SetRootPage(0) } // dereference removes all references to the old mmap. @@ -723,11 +923,11 @@ func (b *Bucket) dereference() { } // pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { +// Otherwise, returns the underlying page. +func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) { // Inline buckets have a fake page embedded in their value so treat them // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { + if b.RootPage() == 0 { if id != 0 { panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) } @@ -797,3 +997,9 @@ func cloneBytes(v []byte) []byte { copy(clone, v) return clone } + +type BucketStructure struct { + Name string `json:"name"` // name of the bucket + KeyN int `json:"keyN"` // number of key/value pairs + Children []BucketStructure `json:"buckets,omitempty"` // child buckets +} diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go index bbfd92a9bc1ee..0c1e28c106ff2 100644 --- a/vendor/go.etcd.io/bbolt/cursor.go +++ b/vendor/go.etcd.io/bbolt/cursor.go @@ -4,6 +4,9 @@ import ( "bytes" "fmt" "sort" + + "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" ) // Cursor represents an iterator that can traverse over all key/value pairs in a bucket @@ -30,9 +33,9 @@ func (c *Cursor) Bucket() *Bucket { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.first() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -40,7 +43,7 @@ func (c *Cursor) First() (key []byte, value []byte) { func (c *Cursor) first() (key []byte, value []byte, flags uint32) { c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.goToFirstElementOnTheStack() @@ -51,7 +54,7 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil, flags } return k, v, flags @@ -61,9 +64,9 @@ func (c *Cursor) first() (key []byte, value []byte, flags uint32) { // If the bucket is empty then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) + p, n := c.bucket.pageNode(c.bucket.RootPage()) ref := elemRef{page: p, node: n} ref.index = ref.count() - 1 c.stack = append(c.stack, ref) @@ -80,7 +83,7 @@ func (c *Cursor) Last() (key []byte, value []byte) { } k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -90,9 +93,9 @@ func (c *Cursor) Last() (key []byte, value []byte) { // If the cursor is at the end of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -102,9 +105,9 @@ func (c *Cursor) Next() (key []byte, value []byte) { // If the cursor is at the beginning of the bucket then a nil key and value are returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.prev() - if (flags & uint32(bucketLeafFlag)) != 0 { + if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -115,7 +118,7 @@ func (c *Cursor) Prev() (key []byte, value []byte) { // follow, a nil key is returned. // The returned key and value are only valid for the life of the transaction. func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") + common.Assert(c.bucket.tx.db != nil, "tx closed") k, v, flags := c.seek(seek) @@ -126,7 +129,7 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { if k == nil { return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { + } else if (flags & uint32(common.BucketLeafFlag)) != 0 { return k, nil } return k, v @@ -136,15 +139,15 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { // Delete fails if current key/value is a bucket or if the transaction is not writable. func (c *Cursor) Delete() error { if c.bucket.tx.db == nil { - return ErrTxClosed + return errors.ErrTxClosed } else if !c.bucket.Writable() { - return ErrTxNotWritable + return errors.ErrTxNotWritable } key, _, flags := c.keyValue() // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue + if (flags & common.BucketLeafFlag) != 0 { + return errors.ErrIncompatibleValue } c.node().del(key) @@ -156,7 +159,7 @@ func (c *Cursor) Delete() error { func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { // Start from root page/node and traverse to correct page. c.stack = c.stack[:0] - c.search(seek, c.bucket.root) + c.search(seek, c.bucket.RootPage()) // If this is a bucket then return a nil value. return c.keyValue() @@ -172,11 +175,11 @@ func (c *Cursor) goToFirstElementOnTheStack() { } // Keep adding pages pointing to the first element to the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) @@ -193,11 +196,11 @@ func (c *Cursor) last() { } // Keep adding pages pointing to the last element in the stack. - var pgId pgid + var pgId common.Pgid if ref.node != nil { - pgId = ref.node.inodes[ref.index].pgid + pgId = ref.node.inodes[ref.index].Pgid() } else { - pgId = ref.page.branchPageElement(uint16(ref.index)).pgid + pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid() } p, n := c.bucket.pageNode(pgId) @@ -277,10 +280,10 @@ func (c *Cursor) prev() (key []byte, value []byte, flags uint32) { } // search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgId pgid) { +func (c *Cursor) search(key []byte, pgId common.Pgid) { p, n := c.bucket.pageNode(pgId) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) + if p != nil && !p.IsBranchPage() && !p.IsLeafPage() { + panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags())) } e := elemRef{page: p, node: n} c.stack = append(c.stack, e) @@ -303,7 +306,7 @@ func (c *Cursor) searchNode(key []byte, n *node) { index := sort.Search(len(n.inodes), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) + ret := bytes.Compare(n.inodes[i].Key(), key) if ret == 0 { exact = true } @@ -315,18 +318,18 @@ func (c *Cursor) searchNode(key []byte, n *node) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) + c.search(key, n.inodes[index].Pgid()) } -func (c *Cursor) searchPage(key []byte, p *page) { +func (c *Cursor) searchPage(key []byte, p *common.Page) { // Binary search for the correct range. - inodes := p.branchPageElements() + inodes := p.BranchPageElements() var exact bool - index := sort.Search(int(p.count), func(i int) bool { + index := sort.Search(int(p.Count()), func(i int) bool { // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) + ret := bytes.Compare(inodes[i].Key(), key) if ret == 0 { exact = true } @@ -338,7 +341,7 @@ func (c *Cursor) searchPage(key []byte, p *page) { c.stack[len(c.stack)-1].index = index // Recursively search to the next page. - c.search(key, inodes[index].pgid) + c.search(key, inodes[index].Pgid()) } // nsearch searches the leaf node on the top of the stack for a key. @@ -349,16 +352,16 @@ func (c *Cursor) nsearch(key []byte) { // If we have a node then search its inodes. if n != nil { index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 + return bytes.Compare(n.inodes[i].Key(), key) != -1 }) e.index = index return } // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 + inodes := p.LeafPageElements() + index := sort.Search(int(p.Count()), func(i int) bool { + return bytes.Compare(inodes[i].Key(), key) != -1 }) e.index = index } @@ -375,17 +378,17 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) { // Retrieve value from node. if ref.node != nil { inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags + return inode.Key(), inode.Value(), inode.Flags() } // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags + elem := ref.page.LeafPageElement(uint16(ref.index)) + return elem.Key(), elem.Value(), elem.Flags() } // node returns the node that the cursor is currently positioned on. func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") + common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") // If the top of the stack is a leaf node then just return it. if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { @@ -395,19 +398,19 @@ func (c *Cursor) node() *node { // Start from root and traverse down the hierarchy. var n = c.stack[0].node if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) + n = c.bucket.node(c.stack[0].page.Id(), nil) } for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") + common.Assert(!n.isLeaf, "expected branch node") n = n.childAt(ref.index) } - _assert(n.isLeaf, "expected leaf node") + common.Assert(n.isLeaf, "expected leaf node") return n } // elemRef represents a reference to an element on a given page/node. type elemRef struct { - page *page + page *common.Page node *node index int } @@ -417,7 +420,7 @@ func (r *elemRef) isLeaf() bool { if r.node != nil { return r.node.isLeaf } - return (r.page.flags & leafPageFlag) != 0 + return r.page.IsLeafPage() } // count returns the number of inodes or page elements. @@ -425,5 +428,5 @@ func (r *elemRef) count() int { if r.node != nil { return len(r.node.inodes) } - return int(r.page.count) + return int(r.page.Count()) } diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 822798e41a5bf..5c1947e998413 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -3,49 +3,28 @@ package bbolt import ( "errors" "fmt" - "hash/fnv" "io" "os" "runtime" - "sort" "sync" "time" "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -const pgidNoFreelist pgid = 0xffffffffffffffff - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" + fl "go.etcd.io/bbolt/internal/freelist" ) -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - // The time elapsed between consecutive file locking attempts. const flockRetryTimeout = 50 * time.Millisecond // FreelistType is the type of the freelist backend type FreelistType string +// TODO(ahrtr): eventually we should (step by step) +// 1. default to `FreelistMapType`; +// 2. remove the `FreelistArrayType`, do not export `FreelistMapType` +// and remove field `FreelistType' from both `DB` and `Options`; const ( // FreelistArrayType indicates backend freelist type is array FreelistArrayType = FreelistType("array") @@ -137,6 +116,8 @@ type DB struct { // Supported only on Unix via mlock/munlock syscalls. Mlock bool + logger Logger + path string openFile func(string, int, os.FileMode) (*os.File, error) file *os.File @@ -146,15 +127,14 @@ type DB struct { dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta + meta0 *common.Meta + meta1 *common.Meta pageSize int opened bool rwtx *Tx txs []*Tx - freelist *freelist + freelist fl.Interface freelistLoad sync.Once pagePool sync.Pool @@ -191,13 +171,15 @@ func (db *DB) String() string { return fmt.Sprintf("DB<%q>", db.path) } -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. +// Open creates and opens a database at the given path with a given file mode. +// If the file does not exist then it will be created automatically with a given file mode. // Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - db := &DB{ +// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600 +func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) { + db = &DB{ opened: true, } + // Set default options if no options are provided. if options == nil { options = DefaultOptions @@ -211,9 +193,27 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { db.Mlock = options.Mlock // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize + db.MaxBatchSize = common.DefaultMaxBatchSize + db.MaxBatchDelay = common.DefaultMaxBatchDelay + db.AllocSize = common.DefaultAllocSize + + if options.Logger == nil { + db.logger = getDiscardLogger() + } else { + db.logger = options.Logger + } + + lg := db.Logger() + if lg != discardLogger { + lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options) + defer func() { + if err != nil { + lg.Errorf("Opening bbolt db (%s) failed: %v", path, err) + } else { + lg.Infof("Opening bbolt db (%s) successfully", path) + } + }() + } flag := os.O_RDWR if options.ReadOnly { @@ -222,6 +222,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } else { // always load free pages in write mode db.PreLoadFreelist = true + flag |= os.O_CREATE } db.openFile = options.OpenFile @@ -230,9 +231,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Open data file and separate sync handler for metadata writes. - var err error - if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil { + if db.file, err = db.openFile(path, flag, mode); err != nil { _ = db.close() + lg.Errorf("failed to open db file (%s): %v", path, err) return nil, err } db.path = db.file.Name() @@ -244,8 +245,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // if !options.ReadOnly. // The database file is locked using the shared lock (more than one process may // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, !db.readOnly, options.Timeout); err != nil { + if err = flock(db, !db.readOnly, options.Timeout); err != nil { _ = db.close() + lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err) return nil, err } @@ -254,27 +256,28 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if db.pageSize = options.PageSize; db.pageSize == 0 { // Set the default page size to the OS page size. - db.pageSize = defaultPageSize + db.pageSize = common.DefaultPageSize } // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { + if info, statErr := db.file.Stat(); statErr != nil { _ = db.close() - return nil, err + lg.Errorf("failed to get db file's stats (%s): %v", path, err) + return nil, statErr } else if info.Size() == 0 { // Initialize new files with meta pages. - if err := db.init(); err != nil { + if err = db.init(); err != nil { // clean up file descriptor on initialization fail _ = db.close() + lg.Errorf("failed to initialize db file (%s): %v", path, err) return nil, err } } else { // try to get the page size from the metadata pages - if pgSize, err := db.getPageSize(); err == nil { - db.pageSize = pgSize - } else { + if db.pageSize, err = db.getPageSize(); err != nil { _ = db.close() - return nil, ErrInvalid + lg.Errorf("failed to get page size from db file (%s): %v", path, err) + return nil, err } } @@ -286,8 +289,9 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { } // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { + if err = db.mmap(options.InitialMmapSize); err != nil { _ = db.close() + lg.Errorf("failed to map db file (%s): %v", path, err) return nil, err } @@ -302,13 +306,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Flush freelist when transitioning from no sync to sync so // NoFreelistSync unaware boltdb can open the db later. if !db.NoFreelistSync && !db.hasSyncedFreelist() { - tx, err := db.Begin(true) + tx, txErr := db.Begin(true) if tx != nil { - err = tx.Commit() + txErr = tx.Commit() } - if err != nil { + if txErr != nil { + lg.Errorf("starting readwrite transaction failed: %v", txErr) _ = db.close() - return nil, err + return nil, txErr } } @@ -352,7 +357,7 @@ func (db *DB) getPageSize() (int, error) { return db.pageSize, nil } - return 0, ErrInvalid + return 0, berrors.ErrInvalid } // getPageSizeFromFirstMeta reads the pageSize from the first meta page @@ -361,11 +366,11 @@ func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) { var metaCanRead bool if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // getPageSizeFromSecondMeta reads the pageSize from the second meta page @@ -397,13 +402,13 @@ func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) { bw, err := db.file.ReadAt(buf[:], pos) if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) { metaCanRead = true - if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil { - return int(m.pageSize), metaCanRead, nil + if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil { + return int(m.PageSize()), metaCanRead, nil } } } - return 0, metaCanRead, ErrInvalid + return 0, metaCanRead, berrors.ErrInvalid } // loadFreelist reads the freelist if it is synced, or reconstructs it @@ -414,17 +419,29 @@ func (db *DB) loadFreelist() { db.freelist = newFreelist(db.FreelistType) if !db.hasSyncedFreelist() { // Reconstruct free list by scanning the DB. - db.freelist.readIDs(db.freepages()) + db.freelist.Init(db.freepages()) } else { // Read free list from freelist page. - db.freelist.read(db.page(db.meta().freelist)) + db.freelist.Read(db.page(db.meta().Freelist())) } - db.stats.FreePageN = db.freelist.free_count() + db.stats.FreePageN = db.freelist.FreeCount() }) } func (db *DB) hasSyncedFreelist() bool { - return db.meta().freelist != pgidNoFreelist + return db.meta().Freelist() != common.PgidNoFreelist +} + +func (db *DB) fileSize() (int, error) { + info, err := db.file.Stat() + if err != nil { + return 0, fmt.Errorf("file stat error: %w", err) + } + sz := int(info.Size()) + if sz < db.pageSize*2 { + return 0, fmt.Errorf("file size too small %d", sz) + } + return sz, nil } // mmap opens the underlying memory-mapped file and initializes the meta references. @@ -433,21 +450,22 @@ func (db *DB) mmap(minsz int) (err error) { db.mmaplock.Lock() defer db.mmaplock.Unlock() - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } + lg := db.Logger() // Ensure the size is at least the minimum size. - fileSize := int(info.Size()) + var fileSize int + fileSize, err = db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } var size = fileSize if size < minsz { size = minsz } size, err = db.mmapSize(size) if err != nil { + lg.Errorf("getting map size failed: %w", err) return err } @@ -472,6 +490,7 @@ func (db *DB) mmap(minsz int) (err error) { // gofail: var mapError string // return errors.New(mapError) if err = mmap(db, size); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err) return err } @@ -493,15 +512,16 @@ func (db *DB) mmap(minsz int) (err error) { } // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() + db.meta0 = db.page(0).Meta() + db.meta1 = db.page(1).Meta() // Validate the meta pages. We only return an error if both meta pages fail // validation, since meta0 failing validation means that it wasn't saved // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() + err0 := db.meta0.Validate() + err1 := db.meta1.Validate() if err0 != nil && err1 != nil { + lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1) return err0 } @@ -524,6 +544,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("unmap error: %v", err.Error()) } @@ -543,13 +564,13 @@ func (db *DB) mmapSize(size int) (int, error) { // Verify the requested size is not above the maximum allowed. if size > maxMapSize { - return 0, fmt.Errorf("mmap too large") + return 0, errors.New("mmap too large") } // If larger than 1GB then grow by 1GB at a time. sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder + if remainder := sz % int64(common.MaxMmapStep); remainder > 0 { + sz += int64(common.MaxMmapStep) - remainder } // Ensure that the mmap size is a multiple of the page size. @@ -571,6 +592,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("munlock error: %v", err.Error()) } return nil @@ -580,6 +602,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err) return fmt.Errorf("mlock error: %v", err.Error()) } return nil @@ -600,42 +623,43 @@ func (db *DB) init() error { // Create two meta pages on a buffer. buf := make([]byte, db.pageSize*4) for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf, pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag + p := db.pageInBuffer(buf, common.Pgid(i)) + p.SetId(common.Pgid(i)) + p.SetFlags(common.MetaPageFlag) // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() + m := p.Meta() + m.SetMagic(common.Magic) + m.SetVersion(common.Version) + m.SetPageSize(uint32(db.pageSize)) + m.SetFreelist(2) + m.SetRootBucket(common.NewInBucket(3, 0)) + m.SetPgid(4) + m.SetTxid(common.Txid(i)) + m.SetChecksum(m.Sum64()) } // Write an empty freelist at page 3. - p := db.pageInBuffer(buf, pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 + p := db.pageInBuffer(buf, common.Pgid(2)) + p.SetId(2) + p.SetFlags(common.FreelistPageFlag) + p.SetCount(0) // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf, pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 + p = db.pageInBuffer(buf, common.Pgid(3)) + p.SetId(3) + p.SetFlags(common.LeafPageFlag) + p.SetCount(0) // Write the buffer to our data file. if _, err := db.ops.writeAt(buf, 0); err != nil { + db.Logger().Errorf("writeAt failed: %w", err) return err } if err := fdatasync(db); err != nil { + db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } - db.filesz = len(buf) return nil } @@ -716,13 +740,31 @@ func (db *DB) close() error { // // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { +func (db *DB) Begin(writable bool) (t *Tx, err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debugf("Starting a new transaction [writable: %t]", writable) + defer func() { + if err != nil { + lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err) + } else { + lg.Debugf("Starting a new transaction [writable: %t] successfully", writable) + } + }() + } + if writable { return db.beginRWTx() } return db.beginTx() } +func (db *DB) Logger() Logger { + if db == nil || db.logger == nil { + return getDiscardLogger() + } + return db.logger +} + func (db *DB) beginTx() (*Tx, error) { // Lock the meta pages while we initialize the transaction. We obtain // the meta lock before the mmap lock because that's the order that the @@ -738,14 +780,14 @@ func (db *DB) beginTx() (*Tx, error) { if !db.opened { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.mmaplock.RUnlock() db.metalock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. @@ -755,6 +797,9 @@ func (db *DB) beginTx() (*Tx, error) { // Keep track of transaction until it closes. db.txs = append(db.txs, t) n := len(db.txs) + if db.freelist != nil { + db.freelist.AddReadonlyTXID(t.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -771,7 +816,7 @@ func (db *DB) beginTx() (*Tx, error) { func (db *DB) beginRWTx() (*Tx, error) { // If the database was opened with Options.ReadOnly, return an error. if db.readOnly { - return nil, ErrDatabaseReadOnly + return nil, berrors.ErrDatabaseReadOnly } // Obtain writer lock. This is released by the transaction when it closes. @@ -786,49 +831,23 @@ func (db *DB) beginRWTx() (*Tx, error) { // Exit if the database is not open yet. if !db.opened { db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen + return nil, berrors.ErrDatabaseNotOpen } // Exit if the database is not correctly mapped. if db.data == nil { db.rwlock.Unlock() - return nil, ErrInvalidMapping + return nil, berrors.ErrInvalidMapping } // Create a transaction associated with the database. t := &Tx{writable: true} t.init(db) db.rwtx = t - db.freePages() + db.freelist.ReleasePendingPages() return t, nil } -// freePages releases any pages associated with closed read-only transactions. -func (db *DB) freePages() { - // Free all pending pages prior to earliest open transaction. - sort.Sort(txsById(db.txs)) - minid := txid(0xFFFFFFFFFFFFFFFF) - if len(db.txs) > 0 { - minid = db.txs[0].meta.txid - } - if minid > 0 { - db.freelist.release(minid - 1) - } - // Release unused txid extents. - for _, t := range db.txs { - db.freelist.releaseRange(minid, t.meta.txid-1) - minid = t.meta.txid + 1 - } - db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF)) - // Any page both allocated and freed in an extent is safe to release. -} - -type txsById []*Tx - -func (t txsById) Len() int { return len(t) } -func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] } -func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid } - // removeTx removes a transaction from the database. func (db *DB) removeTx(tx *Tx) { // Release the read lock on the mmap. @@ -848,6 +867,9 @@ func (db *DB) removeTx(tx *Tx) { } } n := len(db.txs) + if db.freelist != nil { + db.freelist.RemoveReadonlyTXID(tx.meta.Txid()) + } // Unlock the meta pages. db.metalock.Unlock() @@ -1056,7 +1078,20 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { // // This is not necessary under normal operation, however, if you use NoSync // then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } +func (db *DB) Sync() (err error) { + if lg := db.Logger(); lg != discardLogger { + lg.Debug("Syncing bbolt db (%s)", db.path) + defer func() { + if err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err) + } else { + lg.Debugf("Syncing bbolt db (%s) successfully", db.path) + } + }() + } + + return fdatasync(db) +} // Stats retrieves ongoing performance stats for the database. // This is only updated when a transaction closes. @@ -1069,37 +1104,37 @@ func (db *DB) Stats() Stats { // This is for internal access to the raw data bytes from the C cursor, use // carefully, or not at all. func (db *DB) Info() *Info { - _assert(db.data != nil, "database file isn't correctly mapped") + common.Assert(db.data != nil, "database file isn't correctly mapped") return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} } // page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) +func (db *DB) page(id common.Pgid) *common.Page { + pos := id * common.Pgid(db.pageSize) + return (*common.Page)(unsafe.Pointer(&db.data[pos])) } // pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) +func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page { + return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)])) } // meta retrieves the current meta page reference. -func (db *DB) meta() *meta { +func (db *DB) meta() *common.Meta { // We have to return the meta with the highest txid which doesn't fail // validation. Otherwise, we can cause errors when in fact the database is // in a consistent state. metaA is the one with the higher txid. metaA := db.meta0 metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { + if db.meta1.Txid() > db.meta0.Txid() { metaA = db.meta1 metaB = db.meta0 } // Use higher meta page if valid. Otherwise, fallback to previous, if valid. - if err := metaA.validate(); err == nil { + if err := metaA.Validate(); err == nil { return metaA - } else if err := metaB.validate(); err == nil { + } else if err := metaB.Validate(); err == nil { return metaB } @@ -1109,7 +1144,7 @@ func (db *DB) meta() *meta { } // allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(txid txid, count int) (*page, error) { +func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) { // Allocate a temporary buffer for the page. var buf []byte if count == 1 { @@ -1117,17 +1152,18 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } else { buf = make([]byte, count*db.pageSize) } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) + p := (*common.Page)(unsafe.Pointer(&buf[0])) + p.SetOverflow(uint32(count - 1)) // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(txid, count); p.id != 0 { + p.SetId(db.freelist.Allocate(txid, count)) + if p.Id() != 0 { return p, nil } // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize + p.SetId(db.rwtx.meta.Pgid()) + var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize if minsz >= db.datasz { if err := db.mmap(minsz); err != nil { return nil, fmt.Errorf("mmap allocate error: %s", err) @@ -1135,7 +1171,8 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { } // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) + curPgid := db.rwtx.meta.Pgid() + db.rwtx.meta.SetPgid(curPgid + common.Pgid(count)) return p, nil } @@ -1143,7 +1180,13 @@ func (db *DB) allocate(txid txid, count int) (*page, error) { // grow grows the size of the database to the given sz. func (db *DB) grow(sz int) error { // Ignore if the new size is less than available file size. - if sz <= db.filesz { + lg := db.Logger() + fileSize, err := db.fileSize() + if err != nil { + lg.Errorf("getting file size failed: %w", err) + return err + } + if sz <= fileSize { return nil } @@ -1162,21 +1205,22 @@ func (db *DB) grow(sz int) error { // gofail: var resizeFileError string // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err) return fmt.Errorf("file resize error: %s", err) } } if err := db.file.Sync(); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err) return fmt.Errorf("file sync error: %s", err) } if db.Mlock { // unlock old file and lock new one - if err := db.mrelock(db.filesz, sz); err != nil { + if err := db.mrelock(fileSize, sz); err != nil { return fmt.Errorf("mlock/munlock error: %s", err) } } } - db.filesz = sz return nil } @@ -1184,7 +1228,7 @@ func (db *DB) IsReadOnly() bool { return db.readOnly } -func (db *DB) freepages() []pgid { +func (db *DB) freepages() []common.Pgid { tx, err := db.beginTx() defer func() { err = tx.Rollback() @@ -1196,21 +1240,21 @@ func (db *DB) freepages() []pgid { panic("freepages: failed to open read only tx") } - reachable := make(map[pgid]*page) - nofreed := make(map[pgid]bool) + reachable := make(map[common.Pgid]*common.Page) + nofreed := make(map[common.Pgid]bool) ech := make(chan error) go func() { for e := range ech { panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e)) } }() - tx.checkBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) + tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech) close(ech) // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages. - var fids []pgid - for i := pgid(2); i < db.meta().pgid; i++ { + var fids []common.Pgid + for i := common.Pgid(2); i < db.meta().Pgid(); i++ { if _, ok := reachable[i]; !ok { fids = append(fids, i) } @@ -1218,11 +1262,17 @@ func (db *DB) freepages() []pgid { return fids } +func newFreelist(freelistType FreelistType) fl.Interface { + if freelistType == FreelistMapType { + return fl.NewHashMapFreelist() + } + return fl.NewArrayFreelist() +} + // Options represents the options that can be set when opening a database. type Options struct { // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. + // When set to zero it will wait indefinitely. Timeout time.Duration // Sets the DB.NoGrowSync flag before memory mapping the file. @@ -1277,6 +1327,19 @@ type Options struct { // It prevents potential page faults, however // used memory can't be reclaimed. (UNIX only) Mlock bool + + // Logger is the logger used for bbolt. + Logger Logger +} + +func (o *Options) String() string { + if o == nil { + return "{}" + } + + return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}", + o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger) + } // DefaultOptions represent the options used if nil options are passed into Open(). @@ -1327,65 +1390,3 @@ type Info struct { Data uintptr PageSize int } - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist { - // TODO: reject pgidNoFreeList if !NoFreelistSync - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go index f2c3b20ed8b7e..02958c86f5df8 100644 --- a/vendor/go.etcd.io/bbolt/errors.go +++ b/vendor/go.etcd.io/bbolt/errors.go @@ -1,78 +1,108 @@ package bbolt -import "errors" +import "go.etcd.io/bbolt/errors" // These errors can be returned when opening or calling methods on a DB. var ( // ErrDatabaseNotOpen is returned when a DB instance is accessed before it // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen // ErrInvalid is returned when both meta pages on a database are invalid. // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalid = errors.ErrInvalid // ErrInvalidMapping is returned when the database file fails to get mapped. - ErrInvalidMapping = errors.New("database isn't correctly mapped") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrInvalidMapping = errors.ErrInvalidMapping // ErrVersionMismatch is returned when the data file was created with a // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrVersionMismatch = errors.ErrVersionMismatch - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrChecksum = errors.ErrChecksum // ErrTimeout is returned when a database cannot obtain an exclusive lock // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTimeout = errors.ErrTimeout ) // These errors can occur when beginning or committing a Tx. var ( // ErrTxNotWritable is returned when performing a write operation on a // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxNotWritable = errors.ErrTxNotWritable // ErrTxClosed is returned when committing or rolling back a transaction // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrTxClosed = errors.ErrTxClosed // ErrDatabaseReadOnly is returned when a mutating transaction is started on a // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly // ErrFreePagesNotLoaded is returned when a readonly transaction without // preloading the free pages is trying to access the free pages. - ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded ) // These errors can occur when putting or deleting a value or a bucket. var ( // ErrBucketNotFound is returned when trying to access a bucket that has // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNotFound = errors.ErrBucketNotFound // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketExists = errors.ErrBucketExists // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrBucketNameRequired = errors.ErrBucketNameRequired // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyRequired = errors.ErrKeyRequired // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrKeyTooLarge = errors.ErrKeyTooLarge // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrValueTooLarge = errors.ErrValueTooLarge // ErrIncompatibleValue is returned when trying create or delete a bucket // on an existing non-bucket key or when trying to create or delete a // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") + // + // Deprecated: Use the error variables defined in the bbolt/errors package. + ErrIncompatibleValue = errors.ErrIncompatibleValue ) diff --git a/vendor/go.etcd.io/bbolt/errors/errors.go b/vendor/go.etcd.io/bbolt/errors/errors.go new file mode 100644 index 0000000000000..c115289e56c23 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/errors/errors.go @@ -0,0 +1,84 @@ +// Package errors defines the error variables that may be returned +// during bbolt operations. +package errors + +import "errors" + +// These errors can be returned when opening or calling methods on a DB. +var ( + // ErrDatabaseNotOpen is returned when a DB instance is accessed before it + // is opened or after it is closed. + ErrDatabaseNotOpen = errors.New("database not open") + + // ErrInvalid is returned when both meta pages on a database are invalid. + // This typically occurs when a file is not a bolt database. + ErrInvalid = errors.New("invalid database") + + // ErrInvalidMapping is returned when the database file fails to get mapped. + ErrInvalidMapping = errors.New("database isn't correctly mapped") + + // ErrVersionMismatch is returned when the data file was created with a + // different version of Bolt. + ErrVersionMismatch = errors.New("version mismatch") + + // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages. + ErrChecksum = errors.New("checksum error") + + // ErrTimeout is returned when a database cannot obtain an exclusive lock + // on the data file after the timeout passed to Open(). + ErrTimeout = errors.New("timeout") +) + +// These errors can occur when beginning or committing a Tx. +var ( + // ErrTxNotWritable is returned when performing a write operation on a + // read-only transaction. + ErrTxNotWritable = errors.New("tx not writable") + + // ErrTxClosed is returned when committing or rolling back a transaction + // that has already been committed or rolled back. + ErrTxClosed = errors.New("tx closed") + + // ErrDatabaseReadOnly is returned when a mutating transaction is started on a + // read-only database. + ErrDatabaseReadOnly = errors.New("database is in read-only mode") + + // ErrFreePagesNotLoaded is returned when a readonly transaction without + // preloading the free pages is trying to access the free pages. + ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded") +) + +// These errors can occur when putting or deleting a value or a bucket. +var ( + // ErrBucketNotFound is returned when trying to access a bucket that has + // not been created yet. + ErrBucketNotFound = errors.New("bucket not found") + + // ErrBucketExists is returned when creating a bucket that already exists. + ErrBucketExists = errors.New("bucket already exists") + + // ErrBucketNameRequired is returned when creating a bucket with a blank name. + ErrBucketNameRequired = errors.New("bucket name required") + + // ErrKeyRequired is returned when inserting a zero-length key. + ErrKeyRequired = errors.New("key required") + + // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. + ErrKeyTooLarge = errors.New("key too large") + + // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. + ErrValueTooLarge = errors.New("value too large") + + // ErrIncompatibleValue is returned when trying to create or delete a bucket + // on an existing non-bucket key or when trying to create or delete a + // non-bucket key on an existing bucket key. + ErrIncompatibleValue = errors.New("incompatible value") + + // ErrSameBuckets is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are the same. + ErrSameBuckets = errors.New("the source and target are the same bucket") + + // ErrDifferentDB is returned when trying to move a sub-bucket between + // source and target buckets, while source and target buckets are in different database files. + ErrDifferentDB = errors.New("the source and target buckets are in different database files") +) diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go deleted file mode 100644 index dffc7bc749b52..0000000000000 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ /dev/null @@ -1,410 +0,0 @@ -package bbolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// txPending holds a list of pgids and corresponding allocation txns -// that are pending to be freed. -type txPending struct { - ids []pgid - alloctx []txid // txids allocating the ids - lastReleaseBegin txid // beginning txid of last matching releaseRange -} - -// pidSet holds the set of starting pgids which have the same span size -type pidSet map[pgid]struct{} - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - freelistType FreelistType // freelist type - ids []pgid // all free and available free page ids. - allocs map[pgid]txid // mapping of txid that allocated a pgid. - pending map[txid]*txPending // mapping of soon-to-be free page ids by tx. - cache map[pgid]struct{} // fast lookup of all free and pending page ids. - freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size - forwardMap map[pgid]uint64 // key is start pgid, value is its span size - backwardMap map[pgid]uint64 // key is end pgid, value is its span size - allocate func(txid txid, n int) pgid // the freelist allocate func - free_count func() int // the function which gives you free page number - mergeSpans func(ids pgids) // the mergeSpan func - getFreePageIDs func() []pgid // get free pgids func - readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist(freelistType FreelistType) *freelist { - f := &freelist{ - freelistType: freelistType, - allocs: make(map[pgid]txid), - pending: make(map[txid]*txPending), - cache: make(map[pgid]struct{}), - freemaps: make(map[uint64]pidSet), - forwardMap: make(map[pgid]uint64), - backwardMap: make(map[pgid]uint64), - } - - if freelistType == FreelistMapType { - f.allocate = f.hashmapAllocate - f.free_count = f.hashmapFreeCount - f.mergeSpans = f.hashmapMergeSpans - f.getFreePageIDs = f.hashmapGetFreePageIDs - f.readIDs = f.hashmapReadIDs - } else { - f.allocate = f.arrayAllocate - f.free_count = f.arrayFreeCount - f.mergeSpans = f.arrayMergeSpans - f.getFreePageIDs = f.arrayGetFreePageIDs - f.readIDs = f.arrayReadIDs - } - - return f -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - n := f.count() - if n >= 0xFFFF { - // The first element will be used to store the count. See freelist.write. - n++ - } - return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// arrayFreeCount returns count of free pages(array version) -func (f *freelist) arrayFreeCount() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, txp := range f.pending { - count += len(txp.ids) - } - return count -} - -// copyall copies a list of all free ids and all pending ids in one sorted list. -// f.count returns the minimum length required for dst. -func (f *freelist) copyall(dst []pgid) { - m := make(pgids, 0, f.pending_count()) - for _, txp := range f.pending { - m = append(m, txp.ids...) - } - sort.Sort(m) - mergepgids(dst, f.getFreePageIDs(), m) -} - -// arrayAllocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) arrayAllocate(txid txid, n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - f.allocs[initial] = txid - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - txp := f.pending[txid] - if txp == nil { - txp = &txPending{} - f.pending[txid] = txp - } - allocTxid, ok := f.allocs[p.id] - if ok { - delete(f.allocs, p.id) - } else if (p.flags & freelistPageFlag) != 0 { - // Freelist is always allocated by prior tx. - allocTxid = txid - 1 - } - - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if _, ok := f.cache[id]; ok { - panic(fmt.Sprintf("page %d already freed", id)) - } - // Add to the freelist and cache. - txp.ids = append(txp.ids, id) - txp.alloctx = append(txp.alloctx, allocTxid) - f.cache[id] = struct{}{} - } -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, txp := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, txp.ids...) - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// releaseRange moves pending pages allocated within an extent [begin,end] to the free list. -func (f *freelist) releaseRange(begin, end txid) { - if begin > end { - return - } - var m pgids - for tid, txp := range f.pending { - if tid < begin || tid > end { - continue - } - // Don't recompute freed pages if ranges haven't updated. - if txp.lastReleaseBegin == begin { - continue - } - for i := 0; i < len(txp.ids); i++ { - if atx := txp.alloctx[i]; atx < begin || atx > end { - continue - } - m = append(m, txp.ids[i]) - txp.ids[i] = txp.ids[len(txp.ids)-1] - txp.ids = txp.ids[:len(txp.ids)-1] - txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] - txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] - i-- - } - txp.lastReleaseBegin = begin - if len(txp.ids) == 0 { - delete(f.pending, tid) - } - } - f.mergeSpans(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - txp := f.pending[txid] - if txp == nil { - return - } - var m pgids - for i, pgid := range txp.ids { - delete(f.cache, pgid) - tx := txp.alloctx[i] - if tx == 0 { - continue - } - if tx != txid { - // Pending free aborted; restore page back to alloc list. - f.allocs[pgid] = tx - } else { - // Freed page was allocated by this txn; OK to throw away. - m = append(m, pgid) - } - } - // Remove pages from pending list and mark as free if allocated by txid. - delete(f.pending, txid) - - // Remove pgids which are allocated by this txid - for pgid, tid := range f.allocs { - if tid == txid { - delete(f.allocs, pgid) - } - } - - f.mergeSpans(m) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgId pgid) bool { - _, ok := f.cache[pgId] - return ok -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - if (p.flags & freelistPageFlag) == 0 { - panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ())) - } - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - var idx, count = 0, int(p.count) - if count == 0xFFFF { - idx = 1 - c := *(*pgid)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) - count = int(c) - if count < 0 { - panic(fmt.Sprintf("leading element count %d overflows int", c)) - } - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - data := unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), unsafe.Sizeof(pgid(0)), idx) - ids := unsafe.Slice((*pgid)(data), count) - - // copy the ids, so we don't modify on the freelist page directly - idsCopy := make([]pgid, count) - copy(idsCopy, ids) - // Make sure they're sorted. - sort.Sort(pgids(idsCopy)) - - f.readIDs(idsCopy) - } -} - -// arrayReadIDs initializes the freelist from a given list of ids. -func (f *freelist) arrayReadIDs(ids []pgid) { - f.ids = ids - f.reindex() -} - -func (f *freelist) arrayGetFreePageIDs() []pgid { - return f.ids -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - l := f.count() - if l == 0 { - p.count = uint16(l) - } else if l < 0xFFFF { - p.count = uint16(l) - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l) - f.copyall(ids) - } else { - p.count = 0xFFFF - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - ids := unsafe.Slice((*pgid)(data), l+1) - ids[0] = pgid(l) - f.copyall(ids[1:]) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.getFreePageIDs() { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// noSyncReload reads the freelist from pgids and filters out pending items. -func (f *freelist) noSyncReload(pgids []pgid) { - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range pgids { - if !pcache[id] { - a = append(a, id) - } - } - - f.readIDs(a) -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - ids := f.getFreePageIDs() - f.cache = make(map[pgid]struct{}, len(ids)) - for _, id := range ids { - f.cache[id] = struct{}{} - } - for _, txp := range f.pending { - for _, pendingID := range txp.ids { - f.cache[pendingID] = struct{}{} - } - } -} - -// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array -func (f *freelist) arrayMergeSpans(ids pgids) { - sort.Sort(ids) - f.ids = pgids(f.ids).merge(ids) -} diff --git a/vendor/go.etcd.io/bbolt/freelist_hmap.go b/vendor/go.etcd.io/bbolt/freelist_hmap.go deleted file mode 100644 index dbd67a1e7361a..0000000000000 --- a/vendor/go.etcd.io/bbolt/freelist_hmap.go +++ /dev/null @@ -1,178 +0,0 @@ -package bbolt - -import "sort" - -// hashmapFreeCount returns count of free pages(hashmap version) -func (f *freelist) hashmapFreeCount() int { - // use the forwardMap to get the total count - count := 0 - for _, size := range f.forwardMap { - count += int(size) - } - return count -} - -// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend -func (f *freelist) hashmapAllocate(txid txid, n int) pgid { - if n == 0 { - return 0 - } - - // if we have a exact size match just return short path - if bm, ok := f.freemaps[uint64(n)]; ok { - for pid := range bm { - // remove the span - f.delSpan(pid, uint64(n)) - - f.allocs[pid] = txid - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - // lookup the map to find larger span - for size, bm := range f.freemaps { - if size < uint64(n) { - continue - } - - for pid := range bm { - // remove the initial - f.delSpan(pid, size) - - f.allocs[pid] = txid - - remain := size - uint64(n) - - // add remain span - f.addSpan(pid+pgid(n), remain) - - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, pid+i) - } - return pid - } - } - - return 0 -} - -// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version) -func (f *freelist) hashmapReadIDs(pgids []pgid) { - f.init(pgids) - - // Rebuild the page cache. - f.reindex() -} - -// hashmapGetFreePageIDs returns the sorted free page ids -func (f *freelist) hashmapGetFreePageIDs() []pgid { - count := f.free_count() - if count == 0 { - return nil - } - - m := make([]pgid, 0, count) - for start, size := range f.forwardMap { - for i := 0; i < int(size); i++ { - m = append(m, start+pgid(i)) - } - } - sort.Sort(pgids(m)) - - return m -} - -// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans -func (f *freelist) hashmapMergeSpans(ids pgids) { - for _, id := range ids { - // try to see if we can merge and update - f.mergeWithExistingSpan(id) - } -} - -// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward -func (f *freelist) mergeWithExistingSpan(pid pgid) { - prev := pid - 1 - next := pid + 1 - - preSize, mergeWithPrev := f.backwardMap[prev] - nextSize, mergeWithNext := f.forwardMap[next] - newStart := pid - newSize := uint64(1) - - if mergeWithPrev { - //merge with previous span - start := prev + 1 - pgid(preSize) - f.delSpan(start, preSize) - - newStart -= pgid(preSize) - newSize += preSize - } - - if mergeWithNext { - // merge with next span - f.delSpan(next, nextSize) - newSize += nextSize - } - - f.addSpan(newStart, newSize) -} - -func (f *freelist) addSpan(start pgid, size uint64) { - f.backwardMap[start-1+pgid(size)] = size - f.forwardMap[start] = size - if _, ok := f.freemaps[size]; !ok { - f.freemaps[size] = make(map[pgid]struct{}) - } - - f.freemaps[size][start] = struct{}{} -} - -func (f *freelist) delSpan(start pgid, size uint64) { - delete(f.forwardMap, start) - delete(f.backwardMap, start+pgid(size-1)) - delete(f.freemaps[size], start) - if len(f.freemaps[size]) == 0 { - delete(f.freemaps, size) - } -} - -// initial from pgids using when use hashmap version -// pgids must be sorted -func (f *freelist) init(pgids []pgid) { - if len(pgids) == 0 { - return - } - - size := uint64(1) - start := pgids[0] - - if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { - panic("pgids not sorted") - } - - f.freemaps = make(map[uint64]pidSet) - f.forwardMap = make(map[pgid]uint64) - f.backwardMap = make(map[pgid]uint64) - - for i := 1; i < len(pgids); i++ { - // continuous page - if pgids[i] == pgids[i-1]+1 { - size++ - } else { - f.addSpan(start, size) - - size = 1 - start = pgids[i] - } - } - - // init the tail - if size != 0 && start != 0 { - f.addSpan(start, size) - } -} diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go new file mode 100644 index 0000000000000..2b4ab1453a1e0 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go @@ -0,0 +1,54 @@ +package common + +import ( + "fmt" + "unsafe" +) + +const BucketHeaderSize = int(unsafe.Sizeof(InBucket{})) + +// InBucket represents the on-file representation of a bucket. +// This is stored as the "value" of a bucket key. If the bucket is small enough, +// then its root page can be stored inline in the "value", after the bucket +// header. In the case of inline buckets, the "root" will be 0. +type InBucket struct { + root Pgid // page id of the bucket's root-level page + sequence uint64 // monotonically incrementing, used by NextSequence() +} + +func NewInBucket(root Pgid, seq uint64) InBucket { + return InBucket{ + root: root, + sequence: seq, + } +} + +func (b *InBucket) RootPage() Pgid { + return b.root +} + +func (b *InBucket) SetRootPage(id Pgid) { + b.root = id +} + +// InSequence returns the sequence. The reason why not naming it `Sequence` +// is to avoid duplicated name as `(*Bucket) Sequence()` +func (b *InBucket) InSequence() uint64 { + return b.sequence +} + +func (b *InBucket) SetInSequence(v uint64) { + b.sequence = v +} + +func (b *InBucket) IncSequence() { + b.sequence++ +} + +func (b *InBucket) InlinePage(v []byte) *Page { + return (*Page)(unsafe.Pointer(&v[BucketHeaderSize])) +} + +func (b *InBucket) String() string { + return fmt.Sprintf("", b.root, b.sequence) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go new file mode 100644 index 0000000000000..080b9af789dae --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go @@ -0,0 +1,115 @@ +package common + +import "unsafe" + +// Inode represents an internal node inside of a node. +// It can be used to point to elements in a page or point +// to an element which hasn't been added to a page yet. +type Inode struct { + flags uint32 + pgid Pgid + key []byte + value []byte +} + +type Inodes []Inode + +func (in *Inode) Flags() uint32 { + return in.flags +} + +func (in *Inode) SetFlags(flags uint32) { + in.flags = flags +} + +func (in *Inode) Pgid() Pgid { + return in.pgid +} + +func (in *Inode) SetPgid(id Pgid) { + in.pgid = id +} + +func (in *Inode) Key() []byte { + return in.key +} + +func (in *Inode) SetKey(key []byte) { + in.key = key +} + +func (in *Inode) Value() []byte { + return in.value +} + +func (in *Inode) SetValue(value []byte) { + in.value = value +} + +func ReadInodeFromPage(p *Page) Inodes { + inodes := make(Inodes, int(p.Count())) + isLeaf := p.IsLeafPage() + for i := 0; i < int(p.Count()); i++ { + inode := &inodes[i] + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + inode.SetFlags(elem.Flags()) + inode.SetKey(elem.Key()) + inode.SetValue(elem.Value()) + } else { + elem := p.BranchPageElement(uint16(i)) + inode.SetPgid(elem.Pgid()) + inode.SetKey(elem.Key()) + } + Assert(len(inode.Key()) > 0, "read: zero-length inode key") + } + + return inodes +} + +func WriteInodeToPage(inodes Inodes, p *Page) uint32 { + // Loop over each item and write it to the page. + // off tracks the offset into p of the start of the next data. + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + isLeaf := p.IsLeafPage() + for i, item := range inodes { + Assert(len(item.Key()) > 0, "write: zero-length inode key") + + // Create a slice to write into of needed size and advance + // byte pointer for next iteration. + sz := len(item.Key()) + len(item.Value()) + b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz) + off += uintptr(sz) + + // Write the page element. + if isLeaf { + elem := p.LeafPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetFlags(item.Flags()) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetVsize(uint32(len(item.Value()))) + } else { + elem := p.BranchPageElement(uint16(i)) + elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))) + elem.SetKsize(uint32(len(item.Key()))) + elem.SetPgid(item.Pgid()) + Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred") + } + + // Write data for the element to the end of the page. + l := copy(b, item.Key()) + copy(b[l:], item.Value()) + } + + return uint32(off) +} + +func UsedSpaceInPage(inodes Inodes, p *Page) uint32 { + off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes)) + for _, item := range inodes { + sz := len(item.Key()) + len(item.Value()) + off += uintptr(sz) + } + + return uint32(off) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go new file mode 100644 index 0000000000000..055388604af81 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go @@ -0,0 +1,161 @@ +package common + +import ( + "fmt" + "hash/fnv" + "io" + "unsafe" + + "go.etcd.io/bbolt/errors" +) + +type Meta struct { + magic uint32 + version uint32 + pageSize uint32 + flags uint32 + root InBucket + freelist Pgid + pgid Pgid + txid Txid + checksum uint64 +} + +// Validate checks the marker bytes and version of the meta page to ensure it matches this binary. +func (m *Meta) Validate() error { + if m.magic != Magic { + return errors.ErrInvalid + } else if m.version != Version { + return errors.ErrVersionMismatch + } else if m.checksum != m.Sum64() { + return errors.ErrChecksum + } + return nil +} + +// Copy copies one meta object to another. +func (m *Meta) Copy(dest *Meta) { + *dest = *m +} + +// Write writes the meta onto a page. +func (m *Meta) Write(p *Page) { + if m.root.root >= m.pgid { + panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) + } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist { + // TODO: reject pgidNoFreeList if !NoFreelistSync + panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) + } + + // Page id is either going to be 0 or 1 which we can determine by the transaction ID. + p.id = Pgid(m.txid % 2) + p.SetFlags(MetaPageFlag) + + // Calculate the checksum. + m.checksum = m.Sum64() + + m.Copy(p.Meta()) +} + +// Sum64 generates the checksum for the meta. +func (m *Meta) Sum64() uint64 { + var h = fnv.New64a() + _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) + return h.Sum64() +} + +func (m *Meta) Magic() uint32 { + return m.magic +} + +func (m *Meta) SetMagic(v uint32) { + m.magic = v +} + +func (m *Meta) Version() uint32 { + return m.version +} + +func (m *Meta) SetVersion(v uint32) { + m.version = v +} + +func (m *Meta) PageSize() uint32 { + return m.pageSize +} + +func (m *Meta) SetPageSize(v uint32) { + m.pageSize = v +} + +func (m *Meta) Flags() uint32 { + return m.flags +} + +func (m *Meta) SetFlags(v uint32) { + m.flags = v +} + +func (m *Meta) SetRootBucket(b InBucket) { + m.root = b +} + +func (m *Meta) RootBucket() *InBucket { + return &m.root +} + +func (m *Meta) Freelist() Pgid { + return m.freelist +} + +func (m *Meta) SetFreelist(v Pgid) { + m.freelist = v +} + +func (m *Meta) IsFreelistPersisted() bool { + return m.freelist != PgidNoFreelist +} + +func (m *Meta) Pgid() Pgid { + return m.pgid +} + +func (m *Meta) SetPgid(id Pgid) { + m.pgid = id +} + +func (m *Meta) Txid() Txid { + return m.txid +} + +func (m *Meta) SetTxid(id Txid) { + m.txid = id +} + +func (m *Meta) IncTxid() { + m.txid += 1 +} + +func (m *Meta) DecTxid() { + m.txid -= 1 +} + +func (m *Meta) Checksum() uint64 { + return m.checksum +} + +func (m *Meta) SetChecksum(v uint64) { + m.checksum = v +} + +func (m *Meta) Print(w io.Writer) { + fmt.Fprintf(w, "Version: %d\n", m.version) + fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize) + fmt.Fprintf(w, "Flags: %08x\n", m.flags) + fmt.Fprintf(w, "Root: \n", m.root.root) + fmt.Fprintf(w, "Freelist: \n", m.freelist) + fmt.Fprintf(w, "HWM: \n", m.pgid) + fmt.Fprintf(w, "Txn ID: %d\n", m.txid) + fmt.Fprintf(w, "Checksum: %016x\n", m.checksum) + fmt.Fprintf(w, "\n") +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go new file mode 100644 index 0000000000000..ee808967c5731 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/page.go @@ -0,0 +1,391 @@ +package common + +import ( + "fmt" + "os" + "sort" + "unsafe" +) + +const PageHeaderSize = unsafe.Sizeof(Page{}) + +const MinKeysPerPage = 2 + +const BranchPageElementSize = unsafe.Sizeof(branchPageElement{}) +const LeafPageElementSize = unsafe.Sizeof(leafPageElement{}) +const pgidSize = unsafe.Sizeof(Pgid(0)) + +const ( + BranchPageFlag = 0x01 + LeafPageFlag = 0x02 + MetaPageFlag = 0x04 + FreelistPageFlag = 0x10 +) + +const ( + BucketLeafFlag = 0x01 +) + +type Pgid uint64 + +type Page struct { + id Pgid + flags uint16 + count uint16 + overflow uint32 +} + +func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page { + return &Page{ + id: id, + flags: flags, + count: count, + overflow: overflow, + } +} + +// Typ returns a human-readable page type string used for debugging. +func (p *Page) Typ() string { + if p.IsBranchPage() { + return "branch" + } else if p.IsLeafPage() { + return "leaf" + } else if p.IsMetaPage() { + return "meta" + } else if p.IsFreelistPage() { + return "freelist" + } + return fmt.Sprintf("unknown<%02x>", p.flags) +} + +func (p *Page) IsBranchPage() bool { + return p.flags == BranchPageFlag +} + +func (p *Page) IsLeafPage() bool { + return p.flags == LeafPageFlag +} + +func (p *Page) IsMetaPage() bool { + return p.flags == MetaPageFlag +} + +func (p *Page) IsFreelistPage() bool { + return p.flags == FreelistPageFlag +} + +// Meta returns a pointer to the metadata section of the page. +func (p *Page) Meta() *Meta { + return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) +} + +func (p *Page) FastCheck(id Pgid) { + Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) + // Only one flag of page-type can be set. + Assert(p.IsBranchPage() || + p.IsLeafPage() || + p.IsMetaPage() || + p.IsFreelistPage(), + "page %v: has unexpected type/flags: %x", p.id, p.flags) +} + +// LeafPageElement retrieves the leaf node by index +func (p *Page) LeafPageElement(index uint16) *leafPageElement { + return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + LeafPageElementSize, int(index))) +} + +// LeafPageElements retrieves a list of leaf nodes. +func (p *Page) LeafPageElements() []leafPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) + return elems +} + +// BranchPageElement retrieves the branch node by index +func (p *Page) BranchPageElement(index uint16) *branchPageElement { + return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), + unsafe.Sizeof(branchPageElement{}), int(index))) +} + +// BranchPageElements retrieves a list of branch nodes. +func (p *Page) BranchPageElements() []branchPageElement { + if p.count == 0 { + return nil + } + data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) + return elems +} + +func (p *Page) FreelistPageCount() (int, int) { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags)) + + // If the page.count is at the max uint16 value (64k) then it's considered + // an overflow and the size of the freelist is stored as the first element. + var idx, count = 0, int(p.count) + if count == 0xFFFF { + idx = 1 + c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) + count = int(c) + if count < 0 { + panic(fmt.Sprintf("leading element count %d overflows int", c)) + } + } + + return idx, count +} + +func (p *Page) FreelistPageIds() []Pgid { + Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags)) + + idx, count := p.FreelistPageCount() + + if count == 0 { + return nil + } + + data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx) + ids := unsafe.Slice((*Pgid)(data), count) + + return ids +} + +// dump writes n bytes of the page to STDERR as hex output. +func (p *Page) hexdump(n int) { + buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n) + fmt.Fprintf(os.Stderr, "%x\n", buf) +} + +func (p *Page) PageElementSize() uintptr { + if p.IsLeafPage() { + return LeafPageElementSize + } + return BranchPageElementSize +} + +func (p *Page) Id() Pgid { + return p.id +} + +func (p *Page) SetId(target Pgid) { + p.id = target +} + +func (p *Page) Flags() uint16 { + return p.flags +} + +func (p *Page) SetFlags(v uint16) { + p.flags = v +} + +func (p *Page) Count() uint16 { + return p.count +} + +func (p *Page) SetCount(target uint16) { + p.count = target +} + +func (p *Page) Overflow() uint32 { + return p.overflow +} + +func (p *Page) SetOverflow(target uint32) { + p.overflow = target +} + +func (p *Page) String() string { + return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow) +} + +type Pages []*Page + +func (s Pages) Len() int { return len(s) } +func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id } + +// branchPageElement represents a node on a branch page. +type branchPageElement struct { + pos uint32 + ksize uint32 + pgid Pgid +} + +func (n *branchPageElement) Pos() uint32 { + return n.pos +} + +func (n *branchPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *branchPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *branchPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *branchPageElement) Pgid() Pgid { + return n.pgid +} + +func (n *branchPageElement) SetPgid(v Pgid) { + n.pgid = v +} + +// Key returns a byte slice of the node key. +func (n *branchPageElement) Key() []byte { + return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) +} + +// leafPageElement represents a node on a leaf page. +type leafPageElement struct { + flags uint32 + pos uint32 + ksize uint32 + vsize uint32 +} + +func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement { + return &leafPageElement{ + flags: flags, + pos: pos, + ksize: ksize, + vsize: vsize, + } +} + +func (n *leafPageElement) Flags() uint32 { + return n.flags +} + +func (n *leafPageElement) SetFlags(v uint32) { + n.flags = v +} + +func (n *leafPageElement) Pos() uint32 { + return n.pos +} + +func (n *leafPageElement) SetPos(v uint32) { + n.pos = v +} + +func (n *leafPageElement) Ksize() uint32 { + return n.ksize +} + +func (n *leafPageElement) SetKsize(v uint32) { + n.ksize = v +} + +func (n *leafPageElement) Vsize() uint32 { + return n.vsize +} + +func (n *leafPageElement) SetVsize(v uint32) { + n.vsize = v +} + +// Key returns a byte slice of the node key. +func (n *leafPageElement) Key() []byte { + i := int(n.pos) + j := i + int(n.ksize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +// Value returns a byte slice of the node value. +func (n *leafPageElement) Value() []byte { + i := int(n.pos) + int(n.ksize) + j := i + int(n.vsize) + return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j) +} + +func (n *leafPageElement) IsBucketEntry() bool { + return n.flags&uint32(BucketLeafFlag) != 0 +} + +func (n *leafPageElement) Bucket() *InBucket { + if n.IsBucketEntry() { + return LoadBucket(n.Value()) + } else { + return nil + } +} + +// PageInfo represents human readable information about a page. +type PageInfo struct { + ID int + Type string + Count int + OverflowCount int +} + +type Pgids []Pgid + +func (s Pgids) Len() int { return len(s) } +func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s Pgids) Less(i, j int) bool { return s[i] < s[j] } + +// Merge returns the sorted union of a and b. +func (a Pgids) Merge(b Pgids) Pgids { + // Return the opposite slice if one is nil. + if len(a) == 0 { + return b + } + if len(b) == 0 { + return a + } + merged := make(Pgids, len(a)+len(b)) + Mergepgids(merged, a, b) + return merged +} + +// Mergepgids copies the sorted union of a and b into dst. +// If dst is too small, it panics. +func Mergepgids(dst, a, b Pgids) { + if len(dst) < len(a)+len(b) { + panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) + } + // Copy in the opposite slice if one is nil. + if len(a) == 0 { + copy(dst, b) + return + } + if len(b) == 0 { + copy(dst, a) + return + } + + // Merged will hold all elements from both lists. + merged := dst[:0] + + // Assign lead to the slice with a lower starting value, follow to the higher value. + lead, follow := a, b + if b[0] < a[0] { + lead, follow = b, a + } + + // Continue while there are elements in the lead. + for len(lead) > 0 { + // Merge largest prefix of lead that is ahead of follow[0]. + n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) + merged = append(merged, lead[:n]...) + if n >= len(lead) { + break + } + + // Swap lead and follow. + lead, follow = follow, lead[n:] + } + + // Append what's left in follow. + _ = append(merged, follow...) +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go new file mode 100644 index 0000000000000..8ad8279a090a6 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/types.go @@ -0,0 +1,40 @@ +package common + +import ( + "os" + "runtime" + "time" +) + +// MaxMmapStep is the largest step that can be taken when remapping the mmap. +const MaxMmapStep = 1 << 30 // 1GB + +// Version represents the data file format version. +const Version uint32 = 2 + +// Magic represents a marker value to indicate that a file is a Bolt DB. +const Magic uint32 = 0xED0CDAED + +const PgidNoFreelist Pgid = 0xffffffffffffffff + +// DO NOT EDIT. Copied from the "bolt" package. +const pageMaxAllocSize = 0xFFFFFFF + +// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when +// syncing changes to a file. This is required as some operating systems, +// such as OpenBSD, do not have a unified buffer cache (UBC) and writes +// must be synchronized using the msync(2) syscall. +const IgnoreNoSync = runtime.GOOS == "openbsd" + +// Default values if not set in a DB instance. +const ( + DefaultMaxBatchSize int = 1000 + DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 +) + +// DefaultPageSize is the default page size for db which is set to the OS page size. +var DefaultPageSize = os.Getpagesize() + +// Txid represents the internal transaction identifier. +type Txid uint64 diff --git a/vendor/go.etcd.io/bbolt/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go similarity index 74% rename from vendor/go.etcd.io/bbolt/unsafe.go rename to vendor/go.etcd.io/bbolt/internal/common/unsafe.go index 7745d32ce194f..9b77dd7b2b807 100644 --- a/vendor/go.etcd.io/bbolt/unsafe.go +++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go @@ -1,18 +1,18 @@ -package bbolt +package common import ( "unsafe" ) -func unsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { +func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset) } -func unsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { +func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer { return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz) } -func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { +func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices // // This memory is not allocated from C, but it is unmanaged by Go's @@ -23,5 +23,5 @@ func unsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte { // index 0. However, the wiki never says that the address must be to // the beginning of a C allocation (or even that malloc was used at // all), so this is believed to be correct. - return (*[maxAllocSize]byte)(unsafeAdd(base, offset))[i:j:j] + return (*[pageMaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j] } diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go new file mode 100644 index 0000000000000..bdf82a7b00bc3 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go @@ -0,0 +1,64 @@ +package common + +import ( + "fmt" + "io" + "os" + "unsafe" +) + +func LoadBucket(buf []byte) *InBucket { + return (*InBucket)(unsafe.Pointer(&buf[0])) +} + +func LoadPage(buf []byte) *Page { + return (*Page)(unsafe.Pointer(&buf[0])) +} + +func LoadPageMeta(buf []byte) *Meta { + return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize])) +} + +func CopyFile(srcPath, dstPath string) error { + // Ensure source file exists. + _, err := os.Stat(srcPath) + if os.IsNotExist(err) { + return fmt.Errorf("source file %q not found", srcPath) + } else if err != nil { + return err + } + + // Ensure output file not exist. + _, err = os.Stat(dstPath) + if err == nil { + return fmt.Errorf("output file %q already exists", dstPath) + } else if !os.IsNotExist(err) { + return err + } + + srcDB, err := os.Open(srcPath) + if err != nil { + return fmt.Errorf("failed to open source file %q: %w", srcPath, err) + } + defer srcDB.Close() + dstDB, err := os.Create(dstPath) + if err != nil { + return fmt.Errorf("failed to create output file %q: %w", dstPath, err) + } + defer dstDB.Close() + written, err := io.Copy(dstDB, srcDB) + if err != nil { + return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err) + } + + srcFi, err := srcDB.Stat() + if err != nil { + return fmt.Errorf("failed to get source file info %q: %w", srcPath, err) + } + initialSize := srcFi.Size() + if initialSize != written { + return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize) + } + + return nil +} diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go new file mode 100644 index 0000000000000..eac95e26301bc --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go @@ -0,0 +1,67 @@ +// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go +package common + +import ( + "fmt" + "os" + "strings" +) + +const ENV_VERIFY = "BBOLT_VERIFY" + +type VerificationType string + +const ( + ENV_VERIFY_VALUE_ALL VerificationType = "all" + ENV_VERIFY_VALUE_ASSERT VerificationType = "assert" +) + +func getEnvVerify() string { + return strings.ToLower(os.Getenv(ENV_VERIFY)) +} + +func IsVerificationEnabled(verification VerificationType) bool { + env := getEnvVerify() + return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification)) +} + +// EnableVerifications sets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func EnableVerifications(verification VerificationType) func() { + previousEnv := getEnvVerify() + os.Setenv(ENV_VERIFY, string(verification)) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// EnableAllVerifications enables verification and returns a function +// that can be used to bring the original settings. +func EnableAllVerifications() func() { + return EnableVerifications(ENV_VERIFY_VALUE_ALL) +} + +// DisableVerifications unsets `ENV_VERIFY` and returns a function that +// can be used to bring the original settings. +func DisableVerifications() func() { + previousEnv := getEnvVerify() + os.Unsetenv(ENV_VERIFY) + return func() { + os.Setenv(ENV_VERIFY, previousEnv) + } +} + +// Verify performs verification if the assertions are enabled. +// In the default setup running in tests and skipped in the production code. +func Verify(f func()) { + if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) { + f() + } +} + +// Assert will panic with a given formatted message if the given condition is false. +func Assert(condition bool, msg string, v ...any) { + if !condition { + panic(fmt.Sprintf("assertion failed: "+msg, v...)) + } +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go new file mode 100644 index 0000000000000..0cc1ba7150311 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go @@ -0,0 +1,108 @@ +package freelist + +import ( + "fmt" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +type array struct { + *shared + + ids []common.Pgid // all free and available free page ids. +} + +func (f *array) Init(ids common.Pgids) { + f.ids = ids + f.reindex() +} + +func (f *array) Allocate(txid common.Txid, n int) common.Pgid { + if len(f.ids) == 0 { + return 0 + } + + var initial, previd common.Pgid + for i, id := range f.ids { + if id <= 1 { + panic(fmt.Sprintf("invalid page allocation: %d", id)) + } + + // Reset initial page if this is not contiguous. + if previd == 0 || id-previd != 1 { + initial = id + } + + // If we found a contiguous block then remove it and return it. + if (id-initial)+1 == common.Pgid(n) { + // If we're allocating off the beginning then take the fast path + // and just adjust the existing slice. This will use extra memory + // temporarily but the append() in free() will realloc the slice + // as is necessary. + if (i + 1) == n { + f.ids = f.ids[i+1:] + } else { + copy(f.ids[i-n+1:], f.ids[i+1:]) + f.ids = f.ids[:len(f.ids)-n] + } + + // Remove from the free cache. + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, initial+i) + } + f.allocs[initial] = txid + return initial + } + + previd = id + } + return 0 +} + +func (f *array) FreeCount() int { + return len(f.ids) +} + +func (f *array) freePageIds() common.Pgids { + return f.ids +} + +func (f *array) mergeSpans(ids common.Pgids) { + sort.Sort(ids) + common.Verify(func() { + idsIdx := make(map[common.Pgid]struct{}) + for _, id := range f.ids { + // The existing f.ids shouldn't have duplicated free ID. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids)) + } + idsIdx[id] = struct{}{} + } + + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. Note page 0 and 1 + // are reserved for meta pages, so they can never be free page IDs. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.ids. + if _, ok := idsIdx[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids)) + } + } + }) + f.ids = common.Pgids(f.ids).Merge(ids) +} + +func NewArrayFreelist() Interface { + a := &array{ + shared: newShared(), + ids: []common.Pgid{}, + } + a.Interface = a + return a +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go new file mode 100644 index 0000000000000..2b819506bda64 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go @@ -0,0 +1,82 @@ +package freelist + +import ( + "go.etcd.io/bbolt/internal/common" +) + +type ReadWriter interface { + // Read calls Init with the page ids stored in the given page. + Read(page *common.Page) + + // Write writes the freelist into the given page. + Write(page *common.Page) + + // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write. + // This should never underestimate the size. + EstimatedWritePageSize() int +} + +type Interface interface { + ReadWriter + + // Init initializes this freelist with the given list of pages. + Init(ids common.Pgids) + + // Allocate tries to allocate the given number of contiguous pages + // from the free list pages. It returns the starting page ID if + // available; otherwise, it returns 0. + Allocate(txid common.Txid, numPages int) common.Pgid + + // Count returns the number of free and pending pages. + Count() int + + // FreeCount returns the number of free pages. + FreeCount() int + + // PendingCount returns the number of pending pages. + PendingCount() int + + // AddReadonlyTXID adds a given read-only transaction id for pending page tracking. + AddReadonlyTXID(txid common.Txid) + + // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking. + RemoveReadonlyTXID(txid common.Txid) + + // ReleasePendingPages releases any pages associated with closed read-only transactions. + ReleasePendingPages() + + // Free releases a page and its overflow for a given transaction id. + // If the page is already free or is one of the meta pages, then a panic will occur. + Free(txId common.Txid, p *common.Page) + + // Freed returns whether a given page is in the free list. + Freed(pgId common.Pgid) bool + + // Rollback removes the pages from a given pending tx. + Rollback(txId common.Txid) + + // Copyall copies a list of all free ids and all pending ids in one sorted list. + // f.count returns the minimum length required for dst. + Copyall(dst []common.Pgid) + + // Reload reads the freelist from a page and filters out pending items. + Reload(p *common.Page) + + // NoSyncReload reads the freelist from Pgids and filters out pending items. + NoSyncReload(pgIds common.Pgids) + + // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available. + freePageIds() common.Pgids + + // pendingPageIds returns all pending pages by transaction id. + pendingPageIds() map[common.Txid]*txPending + + // release moves all page ids for a transaction id (or older) to the freelist. + release(txId common.Txid) + + // releaseRange moves pending pages allocated within an extent [begin,end] to the free list. + releaseRange(begin, end common.Txid) + + // mergeSpans is merging the given pages into the freelist + mergeSpans(ids common.Pgids) +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go new file mode 100644 index 0000000000000..8d471f4b5bf1a --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go @@ -0,0 +1,292 @@ +package freelist + +import ( + "fmt" + "reflect" + "sort" + + "go.etcd.io/bbolt/internal/common" +) + +// pidSet holds the set of starting pgids which have the same span size +type pidSet map[common.Pgid]struct{} + +type hashMap struct { + *shared + + freePagesCount uint64 // count of free pages(hashmap version) + freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size + forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size + backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size +} + +func (f *hashMap) Init(pgids common.Pgids) { + // reset the counter when freelist init + f.freePagesCount = 0 + f.freemaps = make(map[uint64]pidSet) + f.forwardMap = make(map[common.Pgid]uint64) + f.backwardMap = make(map[common.Pgid]uint64) + + if len(pgids) == 0 { + return + } + + if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) { + panic("pgids not sorted") + } + + size := uint64(1) + start := pgids[0] + + for i := 1; i < len(pgids); i++ { + // continuous page + if pgids[i] == pgids[i-1]+1 { + size++ + } else { + f.addSpan(start, size) + + size = 1 + start = pgids[i] + } + } + + // init the tail + if size != 0 && start != 0 { + f.addSpan(start, size) + } + + f.reindex() +} + +func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid { + if n == 0 { + return 0 + } + + // if we have a exact size match just return short path + if bm, ok := f.freemaps[uint64(n)]; ok { + for pid := range bm { + // remove the span + f.delSpan(pid, uint64(n)) + + f.allocs[pid] = txid + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + // lookup the map to find larger span + for size, bm := range f.freemaps { + if size < uint64(n) { + continue + } + + for pid := range bm { + // remove the initial + f.delSpan(pid, size) + + f.allocs[pid] = txid + + remain := size - uint64(n) + + // add remain span + f.addSpan(pid+common.Pgid(n), remain) + + for i := common.Pgid(0); i < common.Pgid(n); i++ { + delete(f.cache, pid+i) + } + return pid + } + } + + return 0 +} + +func (f *hashMap) FreeCount() int { + common.Verify(func() { + expectedFreePageCount := f.hashmapFreeCountSlow() + common.Assert(int(f.freePagesCount) == expectedFreePageCount, + "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount) + }) + return int(f.freePagesCount) +} + +func (f *hashMap) freePageIds() common.Pgids { + count := f.FreeCount() + if count == 0 { + return common.Pgids{} + } + + m := make([]common.Pgid, 0, count) + + startPageIds := make([]common.Pgid, 0, len(f.forwardMap)) + for k := range f.forwardMap { + startPageIds = append(startPageIds, k) + } + sort.Sort(common.Pgids(startPageIds)) + + for _, start := range startPageIds { + if size, ok := f.forwardMap[start]; ok { + for i := 0; i < int(size); i++ { + m = append(m, start+common.Pgid(i)) + } + } + } + + return m +} + +func (f *hashMap) hashmapFreeCountSlow() int { + count := 0 + for _, size := range f.forwardMap { + count += int(size) + } + return count +} + +func (f *hashMap) addSpan(start common.Pgid, size uint64) { + f.backwardMap[start-1+common.Pgid(size)] = size + f.forwardMap[start] = size + if _, ok := f.freemaps[size]; !ok { + f.freemaps[size] = make(map[common.Pgid]struct{}) + } + + f.freemaps[size][start] = struct{}{} + f.freePagesCount += size +} + +func (f *hashMap) delSpan(start common.Pgid, size uint64) { + delete(f.forwardMap, start) + delete(f.backwardMap, start+common.Pgid(size-1)) + delete(f.freemaps[size], start) + if len(f.freemaps[size]) == 0 { + delete(f.freemaps, size) + } + f.freePagesCount -= size +} + +func (f *hashMap) mergeSpans(ids common.Pgids) { + common.Verify(func() { + ids1Freemap := f.idsFromFreemaps() + ids2Forward := f.idsFromForwardMap() + ids3Backward := f.idsFromBackwardMap() + + if !reflect.DeepEqual(ids1Freemap, ids2Forward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap)) + } + if !reflect.DeepEqual(ids1Freemap, ids3Backward) { + panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap)) + } + + sort.Sort(ids) + prev := common.Pgid(0) + for _, id := range ids { + // The ids shouldn't have duplicated free ID. + if prev == id { + panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids)) + } + prev = id + + // The ids shouldn't have any overlap with the existing f.freemaps. + if _, ok := ids1Freemap[id]; ok { + panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps)) + } + } + }) + for _, id := range ids { + // try to see if we can merge and update + f.mergeWithExistingSpan(id) + } +} + +// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward +func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) { + prev := pid - 1 + next := pid + 1 + + preSize, mergeWithPrev := f.backwardMap[prev] + nextSize, mergeWithNext := f.forwardMap[next] + newStart := pid + newSize := uint64(1) + + if mergeWithPrev { + //merge with previous span + start := prev + 1 - common.Pgid(preSize) + f.delSpan(start, preSize) + + newStart -= common.Pgid(preSize) + newSize += preSize + } + + if mergeWithNext { + // merge with next span + f.delSpan(next, nextSize) + newSize += nextSize + } + + f.addSpan(newStart, newSize) +} + +// idsFromFreemaps get all free page IDs from f.freemaps. +// used by test only. +func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for size, idSet := range f.freemaps { + for start := range idSet { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps)) + } + ids[id] = struct{}{} + } + } + } + return ids +} + +// idsFromForwardMap get all free page IDs from f.forwardMap. +// used by test only. +func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for start, size := range f.forwardMap { + for i := 0; i < int(size); i++ { + id := start + common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +// idsFromBackwardMap get all free page IDs from f.backwardMap. +// used by test only. +func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} { + ids := make(map[common.Pgid]struct{}) + for end, size := range f.backwardMap { + for i := 0; i < int(size); i++ { + id := end - common.Pgid(i) + if _, ok := ids[id]; ok { + panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap)) + } + ids[id] = struct{}{} + } + } + return ids +} + +func NewHashMapFreelist() Interface { + hm := &hashMap{ + shared: newShared(), + freemaps: make(map[uint64]pidSet), + forwardMap: make(map[common.Pgid]uint64), + backwardMap: make(map[common.Pgid]uint64), + } + hm.Interface = hm + return hm +} diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go new file mode 100644 index 0000000000000..f2d1130083f37 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go @@ -0,0 +1,310 @@ +package freelist + +import ( + "fmt" + "math" + "sort" + "unsafe" + + "go.etcd.io/bbolt/internal/common" +) + +type txPending struct { + ids []common.Pgid + alloctx []common.Txid // txids allocating the ids + lastReleaseBegin common.Txid // beginning txid of last matching releaseRange +} + +type shared struct { + Interface + + readonlyTXIDs []common.Txid // all readonly transaction IDs. + allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid. + cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids. + pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx. +} + +func newShared() *shared { + return &shared{ + pending: make(map[common.Txid]*txPending), + allocs: make(map[common.Pgid]common.Txid), + cache: make(map[common.Pgid]struct{}), + } +} + +func (t *shared) pendingPageIds() map[common.Txid]*txPending { + return t.pending +} + +func (t *shared) PendingCount() int { + var count int + for _, txp := range t.pending { + count += len(txp.ids) + } + return count +} + +func (t *shared) Count() int { + return t.FreeCount() + t.PendingCount() +} + +func (t *shared) Freed(pgId common.Pgid) bool { + _, ok := t.cache[pgId] + return ok +} + +func (t *shared) Free(txid common.Txid, p *common.Page) { + if p.Id() <= 1 { + panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id())) + } + + // Free page and all its overflow pages. + txp := t.pending[txid] + if txp == nil { + txp = &txPending{} + t.pending[txid] = txp + } + allocTxid, ok := t.allocs[p.Id()] + common.Verify(func() { + if allocTxid == txid { + panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid)) + } + }) + if ok { + delete(t.allocs, p.Id()) + } + + for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ { + // Verify that page is not already free. + if _, ok := t.cache[id]; ok { + panic(fmt.Sprintf("page %d already freed", id)) + } + // Add to the freelist and cache. + txp.ids = append(txp.ids, id) + txp.alloctx = append(txp.alloctx, allocTxid) + t.cache[id] = struct{}{} + } +} + +func (t *shared) Rollback(txid common.Txid) { + // Remove page ids from cache. + txp := t.pending[txid] + if txp == nil { + return + } + for i, pgid := range txp.ids { + delete(t.cache, pgid) + tx := txp.alloctx[i] + if tx == 0 { + continue + } + if tx != txid { + // Pending free aborted; restore page back to alloc list. + t.allocs[pgid] = tx + } else { + // A writing TXN should never free a page which was allocated by itself. + panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid)) + } + } + // Remove pages from pending list and mark as free if allocated by txid. + delete(t.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range t.allocs { + if tid == txid { + delete(t.allocs, pgid) + } + } +} + +func (t *shared) AddReadonlyTXID(tid common.Txid) { + t.readonlyTXIDs = append(t.readonlyTXIDs, tid) +} + +func (t *shared) RemoveReadonlyTXID(tid common.Txid) { + for i := range t.readonlyTXIDs { + if t.readonlyTXIDs[i] == tid { + last := len(t.readonlyTXIDs) - 1 + t.readonlyTXIDs[i] = t.readonlyTXIDs[last] + t.readonlyTXIDs = t.readonlyTXIDs[:last] + break + } + } +} + +type txIDx []common.Txid + +func (t txIDx) Len() int { return len(t) } +func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t txIDx) Less(i, j int) bool { return t[i] < t[j] } + +func (t *shared) ReleasePendingPages() { + // Free all pending pages prior to the earliest open transaction. + sort.Sort(txIDx(t.readonlyTXIDs)) + minid := common.Txid(math.MaxUint64) + if len(t.readonlyTXIDs) > 0 { + minid = t.readonlyTXIDs[0] + } + if minid > 0 { + t.release(minid - 1) + } + // Release unused txid extents. + for _, tid := range t.readonlyTXIDs { + t.releaseRange(minid, tid-1) + minid = tid + 1 + } + t.releaseRange(minid, common.Txid(math.MaxUint64)) + // Any page both allocated and freed in an extent is safe to release. +} + +func (t *shared) release(txid common.Txid) { + m := make(common.Pgids, 0) + for tid, txp := range t.pending { + if tid <= txid { + // Move transaction's pending pages to the available freelist. + // Don't remove from the cache since the page is still free. + m = append(m, txp.ids...) + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +func (t *shared) releaseRange(begin, end common.Txid) { + if begin > end { + return + } + m := common.Pgids{} + for tid, txp := range t.pending { + if tid < begin || tid > end { + continue + } + // Don't recompute freed pages if ranges haven't updated. + if txp.lastReleaseBegin == begin { + continue + } + for i := 0; i < len(txp.ids); i++ { + if atx := txp.alloctx[i]; atx < begin || atx > end { + continue + } + m = append(m, txp.ids[i]) + txp.ids[i] = txp.ids[len(txp.ids)-1] + txp.ids = txp.ids[:len(txp.ids)-1] + txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1] + txp.alloctx = txp.alloctx[:len(txp.alloctx)-1] + i-- + } + txp.lastReleaseBegin = begin + if len(txp.ids) == 0 { + delete(t.pending, tid) + } + } + t.mergeSpans(m) +} + +// Copyall copies a list of all free ids and all pending ids in one sorted list. +// f.count returns the minimum length required for dst. +func (t *shared) Copyall(dst []common.Pgid) { + m := make(common.Pgids, 0, t.PendingCount()) + for _, txp := range t.pendingPageIds() { + m = append(m, txp.ids...) + } + sort.Sort(m) + common.Mergepgids(dst, t.freePageIds(), m) +} + +func (t *shared) Reload(p *common.Page) { + t.Read(p) + t.NoSyncReload(t.freePageIds()) +} + +func (t *shared) NoSyncReload(pgIds common.Pgids) { + // Build a cache of only pending pages. + pcache := make(map[common.Pgid]bool) + for _, txp := range t.pending { + for _, pendingID := range txp.ids { + pcache[pendingID] = true + } + } + + // Check each page in the freelist and build a new available freelist + // with any pages not in the pending lists. + a := []common.Pgid{} + for _, id := range pgIds { + if !pcache[id] { + a = append(a, id) + } + } + + t.Init(a) +} + +// reindex rebuilds the free cache based on available and pending free lists. +func (t *shared) reindex() { + free := t.freePageIds() + pending := t.pendingPageIds() + t.cache = make(map[common.Pgid]struct{}, len(free)) + for _, id := range free { + t.cache[id] = struct{}{} + } + for _, txp := range pending { + for _, pendingID := range txp.ids { + t.cache[pendingID] = struct{}{} + } + } +} + +func (t *shared) Read(p *common.Page) { + if !p.IsFreelistPage() { + panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ())) + } + + ids := p.FreelistPageIds() + + // Copy the list of page ids from the freelist. + if len(ids) == 0 { + t.Init([]common.Pgid{}) + } else { + // copy the ids, so we don't modify on the freelist page directly + idsCopy := make([]common.Pgid, len(ids)) + copy(idsCopy, ids) + // Make sure they're sorted. + sort.Sort(common.Pgids(idsCopy)) + + t.Init(idsCopy) + } +} + +func (t *shared) EstimatedWritePageSize() int { + n := t.Count() + if n >= 0xFFFF { + // The first element will be used to store the count. See freelist.write. + n++ + } + return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n) +} + +func (t *shared) Write(p *common.Page) { + // Combine the old free pgids and pgids waiting on an open transaction. + + // Update the header flag. + p.SetFlags(common.FreelistPageFlag) + + // The page.count can only hold up to 64k elements so if we overflow that + // number then we handle it by putting the size in the first element. + l := t.Count() + if l == 0 { + p.SetCount(uint16(l)) + } else if l < 0xFFFF { + p.SetCount(uint16(l)) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l) + t.Copyall(ids) + } else { + p.SetCount(0xFFFF) + data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) + ids := unsafe.Slice((*common.Pgid)(data), l+1) + ids[0] = common.Pgid(l) + t.Copyall(ids[1:]) + } +} diff --git a/vendor/go.etcd.io/bbolt/logger.go b/vendor/go.etcd.io/bbolt/logger.go new file mode 100644 index 0000000000000..fb250894a2956 --- /dev/null +++ b/vendor/go.etcd.io/bbolt/logger.go @@ -0,0 +1,113 @@ +package bbolt + +// See https://github.com/etcd-io/raft/blob/main/logger.go +import ( + "fmt" + "io" + "log" + "os" +) + +type Logger interface { + Debug(v ...interface{}) + Debugf(format string, v ...interface{}) + + Error(v ...interface{}) + Errorf(format string, v ...interface{}) + + Info(v ...interface{}) + Infof(format string, v ...interface{}) + + Warning(v ...interface{}) + Warningf(format string, v ...interface{}) + + Fatal(v ...interface{}) + Fatalf(format string, v ...interface{}) + + Panic(v ...interface{}) + Panicf(format string, v ...interface{}) +} + +func getDiscardLogger() Logger { + return discardLogger +} + +var ( + discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)} +) + +const ( + calldepth = 2 +) + +// DefaultLogger is a default implementation of the Logger interface. +type DefaultLogger struct { + *log.Logger + debug bool +} + +func (l *DefaultLogger) EnableTimestamps() { + l.SetFlags(l.Flags() | log.Ldate | log.Ltime) +} + +func (l *DefaultLogger) EnableDebug() { + l.debug = true +} + +func (l *DefaultLogger) Debug(v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) + } +} + +func (l *DefaultLogger) Debugf(format string, v ...interface{}) { + if l.debug { + _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) + } +} + +func (l *DefaultLogger) Info(v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Infof(format string, v ...interface{}) { + _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Error(v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Errorf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Warning(v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...))) +} + +func (l *DefaultLogger) Warningf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) +} + +func (l *DefaultLogger) Fatal(v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { + _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) + os.Exit(1) +} + +func (l *DefaultLogger) Panic(v ...interface{}) { + l.Logger.Panic(v...) +} + +func (l *DefaultLogger) Panicf(format string, v ...interface{}) { + l.Logger.Panicf(format, v...) +} + +func header(lvl, msg string) string { + return fmt.Sprintf("%s: %s", lvl, msg) +} diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go index 744a972f51a1e..9a0fd332c974f 100644 --- a/vendor/go.etcd.io/bbolt/mlock_unix.go +++ b/vendor/go.etcd.io/bbolt/mlock_unix.go @@ -1,5 +1,4 @@ //go:build !windows -// +build !windows package bbolt diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go index 9c56150d88315..022b1001e279c 100644 --- a/vendor/go.etcd.io/bbolt/node.go +++ b/vendor/go.etcd.io/bbolt/node.go @@ -4,7 +4,8 @@ import ( "bytes" "fmt" "sort" - "unsafe" + + "go.etcd.io/bbolt/internal/common" ) // node represents an in-memory, deserialized page. @@ -14,10 +15,10 @@ type node struct { unbalanced bool spilled bool key []byte - pgid pgid + pgid common.Pgid parent *node children nodes - inodes inodes + inodes common.Inodes } // root returns the top-level node this node is attached to. @@ -38,10 +39,10 @@ func (n *node) minKeys() int { // size returns the size of the node after serialization. func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) } return int(sz) } @@ -50,10 +51,10 @@ func (n *node) size() int { // This is an optimization to avoid calculating a large node when we only need // to know if it fits inside a certain page size. func (n *node) sizeLessThan(v uintptr) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() + sz, elsz := common.PageHeaderSize, n.pageElementSize() for i := 0; i < len(n.inodes); i++ { item := &n.inodes[i] - sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value)) + sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value())) if sz >= v { return false } @@ -64,9 +65,9 @@ func (n *node) sizeLessThan(v uintptr) bool { // pageElementSize returns the size of each page element based on the type of node. func (n *node) pageElementSize() uintptr { if n.isLeaf { - return leafPageElementSize + return common.LeafPageElementSize } - return branchPageElementSize + return common.BranchPageElementSize } // childAt returns the child node at a given index. @@ -74,12 +75,12 @@ func (n *node) childAt(index int) *node { if n.isLeaf { panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) } - return n.bucket.node(n.inodes[index].pgid, n) + return n.bucket.node(n.inodes[index].Pgid(), n) } // childIndex returns the index of a given child node. func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 }) return index } @@ -113,9 +114,9 @@ func (n *node) prevSibling() *node { } // put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { - if pgId >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.pgid)) +func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) { + if pgId >= n.bucket.tx.meta.Pgid() { + panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid())) } else if len(oldKey) <= 0 { panic("put: zero-length old key") } else if len(newKey) <= 0 { @@ -123,30 +124,30 @@ func (n *node) put(oldKey, newKey, value []byte, pgId pgid, flags uint32) { } // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 }) // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) + exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey) if !exact { - n.inodes = append(n.inodes, inode{}) + n.inodes = append(n.inodes, common.Inode{}) copy(n.inodes[index+1:], n.inodes[index:]) } inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgId - _assert(len(inode.key) > 0, "put: zero-length inode key") + inode.SetFlags(flags) + inode.SetKey(newKey) + inode.SetValue(value) + inode.SetPgid(pgId) + common.Assert(len(inode.Key()) > 0, "put: zero-length inode key") } // del removes a key from the node. func (n *node) del(key []byte) { // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) + index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 }) // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { + if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) { return } @@ -158,30 +159,15 @@ func (n *node) del(key []byte) { } // read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } +func (n *node) read(p *common.Page) { + n.pgid = p.Id() + n.isLeaf = p.IsLeafPage() + n.inodes = common.ReadInodeFromPage(p) - // Save first key so we can find the node in the parent when we spill. + // Save first key, so we can find the node in the parent when we spill. if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") + n.key = n.inodes[0].Key() + common.Assert(len(n.key) > 0, "read: zero-length node key") } else { n.key = nil } @@ -190,57 +176,27 @@ func (n *node) read(p *page) { // write writes the items onto one or more pages. // The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set // and the rest should be zeroed. -func (n *node) write(p *page) { - _assert(p.count == 0 && p.flags == 0, "node cannot be written into a not empty page") +func (n *node) write(p *common.Page) { + common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page") // Initialize page. if n.isLeaf { - p.flags = leafPageFlag + p.SetFlags(common.LeafPageFlag) } else { - p.flags = branchPageFlag + p.SetFlags(common.BranchPageFlag) } if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) + panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id())) } - p.count = uint16(len(n.inodes)) + p.SetCount(uint16(len(n.inodes))) // Stop here if there are no items to write. - if p.count == 0 { + if p.Count() == 0 { return } - // Loop over each item and write it to the page. - // off tracks the offset into p of the start of the next data. - off := unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes)) - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Create a slice to write into of needed size and advance - // byte pointer for next iteration. - sz := len(item.key) + len(item.value) - b := unsafeByteSlice(unsafe.Pointer(p), off, 0, sz) - off += uintptr(sz) - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // Write data for the element to the end of the page. - l := copy(b, item.key) - copy(b[l:], item.value) - } + common.WriteInodeToPage(n.inodes, p) // DEBUG ONLY: n.dump() } @@ -273,7 +229,7 @@ func (n *node) split(pageSize uintptr) []*node { func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // Ignore the split if the page doesn't have at least enough nodes for // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { + if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) { return n, nil } @@ -313,17 +269,17 @@ func (n *node) splitTwo(pageSize uintptr) (*node, *node) { // It returns the index as well as the size of the first page. // This is only be called from split(). func (n *node) splitIndex(threshold int) (index, sz uintptr) { - sz = pageHeaderSize + sz = common.PageHeaderSize // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { + for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ { index = uintptr(i) inode := n.inodes[i] - elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value)) + elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value())) // If we have at least the minimum number of keys and adding another // node would put us over the threshold then exit and return. - if index >= minKeysPerPage && sz+elsize > uintptr(threshold) { + if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) { break } @@ -360,7 +316,7 @@ func (n *node) spill() error { for _, node := range nodes { // Add node's page to the freelist if it's not new. if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) + tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid)) node.pgid = 0 } @@ -371,10 +327,10 @@ func (n *node) spill() error { } // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) + if p.Id() >= tx.meta.Pgid() { + panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid())) } - node.pgid = p.id + node.pgid = p.Id() node.write(p) node.spilled = true @@ -382,12 +338,12 @@ func (n *node) spill() error { if node.parent != nil { var key = node.key if key == nil { - key = node.inodes[0].key + key = node.inodes[0].Key() } - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") + node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0) + node.key = node.inodes[0].Key() + common.Assert(len(node.key) > 0, "spill: zero-length node key") } // Update the statistics. @@ -415,8 +371,8 @@ func (n *node) rebalance() { // Update statistics. n.bucket.tx.stats.IncRebalance(1) - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 + // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys. + var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2 if n.size() > threshold && len(n.inodes) > n.minKeys() { return } @@ -426,14 +382,14 @@ func (n *node) rebalance() { // If root node is a branch and only has one node then collapse it. if !n.isLeaf && len(n.inodes) == 1 { // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) + child := n.bucket.node(n.inodes[0].Pgid(), n) n.isLeaf = child.isLeaf n.inodes = child.inodes[:] n.children = child.children // Reparent all child nodes being moved. for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { child.parent = n } } @@ -457,53 +413,37 @@ func (n *node) rebalance() { return } - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") + common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) + // Merge with right sibling if idx == 0, otherwise left sibling. + var leftNode, rightNode *node + var useNextSibling = n.parent.childIndex(n) == 0 if useNextSibling { - target = n.nextSibling() + leftNode = n + rightNode = n.nextSibling() } else { - target = n.prevSibling() + leftNode = n.prevSibling() + rightNode = n } - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } + // If both nodes are too small then merge them. + // Reparent all child nodes being moved. + for _, inode := range rightNode.inodes { + if child, ok := n.bucket.nodes[inode.Pgid()]; ok { + child.parent.removeChild(child) + child.parent = leftNode + child.parent.children = append(child.parent.children, child) } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() } - // Either this node or the target node was deleted from the parent so rebalance it. + // Copy over inodes from right node to left node and remove right node. + leftNode.inodes = append(leftNode.inodes, rightNode.inodes...) + n.parent.del(rightNode.key) + n.parent.removeChild(rightNode) + delete(n.bucket.nodes, rightNode.pgid) + rightNode.free() + + // Either this node or the sibling node was deleted from the parent so rebalance it. n.parent.rebalance() } @@ -525,20 +465,20 @@ func (n *node) dereference() { key := make([]byte, len(n.key)) copy(key, n.key) n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") + common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") } for i := range n.inodes { inode := &n.inodes[i] - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") + key := make([]byte, len(inode.Key())) + copy(key, inode.Key()) + inode.SetKey(key) + common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key") - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value + value := make([]byte, len(inode.Value())) + copy(value, inode.Value()) + inode.SetValue(value) } // Recursively dereference children. @@ -553,7 +493,7 @@ func (n *node) dereference() { // free adds the node's underlying page to the freelist. func (n *node) free() { if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) + n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid)) n.pgid = 0 } } @@ -594,17 +534,5 @@ type nodes []*node func (s nodes) Len() int { return len(s) } func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s nodes) Less(i, j int) bool { - return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 + return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/go.etcd.io/bbolt/page.go b/vendor/go.etcd.io/bbolt/page.go deleted file mode 100644 index bb081b031e659..0000000000000 --- a/vendor/go.etcd.io/bbolt/page.go +++ /dev/null @@ -1,212 +0,0 @@ -package bbolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = unsafe.Sizeof(page{}) - -const minKeysPerPage = 2 - -const branchPageElementSize = unsafe.Sizeof(branchPageElement{}) -const leafPageElementSize = unsafe.Sizeof(leafPageElement{}) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))) -} - -func (p *page) fastCheck(id pgid) { - _assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id) - // Only one flag of page-type can be set. - _assert(p.flags == branchPageFlag || - p.flags == leafPageFlag || - p.flags == metaPageFlag || - p.flags == freelistPageFlag, - "page %v: has unexpected type/flags: %x", p.id, p.flags) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - return (*leafPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - leafPageElementSize, int(index))) -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*leafPageElement)(data), int(p.count)) - return elems -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return (*branchPageElement)(unsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), - unsafe.Sizeof(branchPageElement{}), int(index))) -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - data := unsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)) - elems := unsafe.Slice((*branchPageElement)(data), int(p.count)) - return elems -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, n) - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - return unsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize)) -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - i := int(n.pos) - j := i + int(n.ksize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - i := int(n.pos) + int(n.ksize) - j := i + int(n.vsize) - return unsafeByteSlice(unsafe.Pointer(n), 0, i, j) -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } - merged := make(pgids, len(a)+len(b)) - mergepgids(merged, a, b) - return merged -} - -// mergepgids copies the sorted union of a and b into dst. -// If dst is too small, it panics. -func mergepgids(dst, a, b pgids) { - if len(dst) < len(a)+len(b) { - panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b))) - } - // Copy in the opposite slice if one is nil. - if len(a) == 0 { - copy(dst, b) - return - } - if len(b) == 0 { - copy(dst, a) - return - } - - // Merged will hold all elements from both lists. - merged := dst[:0] - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - _ = append(merged, follow...) -} diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 766395de3be7d..7b5db77278629 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -5,15 +5,16 @@ import ( "fmt" "io" "os" + "runtime" "sort" "strings" "sync/atomic" "time" "unsafe" -) -// txid represents the internal transaction identifier. -type txid uint64 + berrors "go.etcd.io/bbolt/errors" + "go.etcd.io/bbolt/internal/common" +) // Tx represents a read-only or read/write transaction on the database. // Read-only transactions can be used for retrieving values for keys and creating cursors. @@ -27,9 +28,9 @@ type Tx struct { writable bool managed bool db *DB - meta *meta + meta *common.Meta root Bucket - pages map[pgid]*page + pages map[common.Pgid]*common.Page stats TxStats commitHandlers []func() @@ -48,24 +49,27 @@ func (tx *Tx) init(db *DB) { tx.pages = nil // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) + tx.meta = &common.Meta{} + db.meta().Copy(tx.meta) // Copy over the root bucket. tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root + tx.root.InBucket = &common.InBucket{} + *tx.root.InBucket = *(tx.meta.RootBucket()) // Increment the transaction id and add a page cache for writable transactions. if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) + tx.pages = make(map[common.Pgid]*common.Page) + tx.meta.IncTxid() } } // ID returns the transaction id. func (tx *Tx) ID() int { - return int(tx.meta.txid) + if tx == nil || tx.meta == nil { + return -1 + } + return int(tx.meta.Txid()) } // DB returns a reference to the database that created the transaction. @@ -75,7 +79,7 @@ func (tx *Tx) DB() *DB { // Size returns current database size in bytes as seen by this transaction. func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) + return int64(tx.meta.Pgid()) * int64(tx.db.pageSize) } // Writable returns whether the transaction can perform write operations. @@ -96,6 +100,11 @@ func (tx *Tx) Stats() TxStats { return tx.stats } +// Inspect returns the structure of the database. +func (tx *Tx) Inspect() BucketStructure { + return tx.root.Inspect() +} + // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. // The bucket instance is only valid for the lifetime of the transaction. @@ -123,6 +132,24 @@ func (tx *Tx) DeleteBucket(name []byte) error { return tx.root.DeleteBucket(name) } +// MoveBucket moves a sub-bucket from the source bucket to the destination bucket. +// Returns an error if +// 1. the sub-bucket cannot be found in the source bucket; +// 2. or the key already exists in the destination bucket; +// 3. the key represents a non-bucket value. +// +// If src is nil, it means moving a top level bucket into the target bucket. +// If dst is nil, it means converting the child bucket into a top level bucket. +func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error { + if src == nil { + src = &tx.root + } + if dst == nil { + dst = &tx.root + } + return src.MoveBucket(child, dst) +} + // ForEach executes a function for each bucket in the root. // If the provided function returns an error then the iteration is stopped and // the error is returned to the caller. @@ -137,15 +164,28 @@ func (tx *Tx) OnCommit(fn func()) { tx.commitHandlers = append(tx.commitHandlers, fn) } -// Commit writes all changes to disk and updates the meta page. +// Commit writes all changes to disk, updates the meta page and closes the transaction. // Returns an error if a disk write error occurs, or if Commit is // called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") +func (tx *Tx) Commit() (err error) { + txId := tx.ID() + lg := tx.db.Logger() + if lg != discardLogger { + lg.Debugf("Committing transaction %d", txId) + defer func() { + if err != nil { + lg.Errorf("Committing transaction failed: %v", err) + } else { + lg.Debugf("Committing transaction %d successfully", txId) + } + }() + } + + common.Assert(!tx.managed, "managed tx commit not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } else if !tx.writable { - return ErrTxNotWritable + return berrors.ErrTxNotWritable } // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. @@ -157,40 +197,43 @@ func (tx *Tx) Commit() error { tx.stats.IncRebalanceTime(time.Since(startTime)) } - opgid := tx.meta.pgid + opgid := tx.meta.Pgid() // spill data onto dirty pages. startTime = time.Now() - if err := tx.root.spill(); err != nil { + if err = tx.root.spill(); err != nil { + lg.Errorf("spilling data onto dirty pages failed: %v", err) tx.rollback() return err } tx.stats.IncSpillTime(time.Since(startTime)) // Free the old root bucket. - tx.meta.root.root = tx.root.root + tx.meta.RootBucket().SetRootPage(tx.root.RootPage()) // Free the old freelist because commit writes out a fresh freelist. - if tx.meta.freelist != pgidNoFreelist { - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) + if tx.meta.Freelist() != common.PgidNoFreelist { + tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist())) } if !tx.db.NoFreelistSync { - err := tx.commitFreelist() + err = tx.commitFreelist() if err != nil { + lg.Errorf("committing freelist failed: %v", err) return err } } else { - tx.meta.freelist = pgidNoFreelist + tx.meta.SetFreelist(common.PgidNoFreelist) } // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { + if tx.meta.Pgid() > opgid { _ = errors.New("") // gofail: var lackOfDiskSpace string // tx.rollback() // return errors.New(lackOfDiskSpace) - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil { + lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err) tx.rollback() return err } @@ -198,7 +241,8 @@ func (tx *Tx) Commit() error { // Write dirty pages to disk. startTime = time.Now() - if err := tx.write(); err != nil { + if err = tx.write(); err != nil { + lg.Errorf("writing data failed: %v", err) tx.rollback() return err } @@ -208,11 +252,11 @@ func (tx *Tx) Commit() error { ch := tx.Check() var errs []string for { - err, ok := <-ch + chkErr, ok := <-ch if !ok { break } - errs = append(errs, err.Error()) + errs = append(errs, chkErr.Error()) } if len(errs) > 0 { panic("check fail: " + strings.Join(errs, "\n")) @@ -220,7 +264,8 @@ func (tx *Tx) Commit() error { } // Write meta to disk. - if err := tx.writeMeta(); err != nil { + if err = tx.writeMeta(); err != nil { + lg.Errorf("writeMeta failed: %v", err) tx.rollback() return err } @@ -240,16 +285,14 @@ func (tx *Tx) Commit() error { func (tx *Tx) commitFreelist() error { // Allocate new pages for the new free list. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) + p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1) if err != nil { tx.rollback() return err } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id + + tx.db.freelist.Write(p) + tx.meta.SetFreelist(p.Id()) return nil } @@ -257,9 +300,9 @@ func (tx *Tx) commitFreelist() error { // Rollback closes the transaction and ignores all previous updates. Read-only // transactions must be rolled back and not committed. func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") + common.Assert(!tx.managed, "managed tx rollback not allowed") if tx.db == nil { - return ErrTxClosed + return berrors.ErrTxClosed } tx.nonPhysicalRollback() return nil @@ -271,7 +314,7 @@ func (tx *Tx) nonPhysicalRollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) } tx.close() } @@ -282,17 +325,17 @@ func (tx *Tx) rollback() { return } if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) + tx.db.freelist.Rollback(tx.meta.Txid()) // When mmap fails, the `data`, `dataref` and `datasz` may be reset to // zero values, and there is no way to reload free page IDs in this case. if tx.db.data != nil { if !tx.db.hasSyncedFreelist() { // Reconstruct free page list by scanning the DB to get the whole free page list. - // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode. - tx.db.freelist.noSyncReload(tx.db.freepages()) + // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode. + tx.db.freelist.NoSyncReload(tx.db.freepages()) } else { // Read free page list from freelist page. - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) + tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist())) } } } @@ -305,9 +348,9 @@ func (tx *Tx) close() { } if tx.writable { // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() + var freelistFreeN = tx.db.freelist.FreeCount() + var freelistPendingN = tx.db.freelist.PendingCount() + var freelistAlloc = tx.db.freelist.EstimatedWritePageSize() // Remove transaction ref & writer lock. tx.db.rwtx = nil @@ -335,7 +378,7 @@ func (tx *Tx) close() { // Copy writes the entire database to a writer. // This function exists for backwards compatibility. // -// Deprecated; Use WriteTo() instead. +// Deprecated: Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -357,13 +400,13 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { // Generate a meta page. We use the same page data for both meta pages. buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta + page := (*common.Page)(unsafe.Pointer(&buf[0])) + page.SetFlags(common.MetaPageFlag) + *page.Meta() = *tx.meta // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() + page.SetId(0) + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err := w.Write(buf) n += int64(nn) if err != nil { @@ -371,9 +414,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { } // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() + page.SetId(1) + page.Meta().DecTxid() + page.Meta().SetChecksum(page.Meta().Sum64()) nn, err = w.Write(buf) n += int64(nn) if err != nil { @@ -413,14 +456,16 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error { } // allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(tx.meta.txid, count) +func (tx *Tx) allocate(count int) (*common.Page, error) { + lg := tx.db.Logger() + p, err := tx.db.allocate(tx.meta.Txid(), count) if err != nil { + lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err) return nil, err } // Save to our page cache. - tx.pages[p.id] = p + tx.pages[p.Id()] = p // Update statistics. tx.stats.IncPageCount(int64(count)) @@ -432,18 +477,19 @@ func (tx *Tx) allocate(count int) (*page, error) { // write writes any dirty pages to disk. func (tx *Tx) write() error { // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) + lg := tx.db.Logger() + pages := make(common.Pages, 0, len(tx.pages)) for _, p := range tx.pages { pages = append(pages, p) } // Clear out page cache early. - tx.pages = make(map[pgid]*page) + tx.pages = make(map[common.Pgid]*common.Page) sort.Sort(pages) // Write pages to disk in order. for _, p := range pages { - rem := (uint64(p.overflow) + 1) * uint64(tx.db.pageSize) - offset := int64(p.id) * int64(tx.db.pageSize) + rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize) + offset := int64(p.Id()) * int64(tx.db.pageSize) var written uintptr // Write out page in "max allocation" sized chunks. @@ -452,9 +498,10 @@ func (tx *Tx) write() error { if sz > maxAllocSize-1 { sz = maxAllocSize - 1 } - buf := unsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz)) if _, err := tx.db.ops.writeAt(buf, offset); err != nil { + lg.Errorf("writeAt failed, offset: %d: %w", offset, err) return err } @@ -474,9 +521,10 @@ func (tx *Tx) write() error { } // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -485,11 +533,11 @@ func (tx *Tx) write() error { for _, p := range pages { // Ignore page sizes over 1 page. // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { + if int(p.Overflow()) != 0 { continue } - buf := unsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) + buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize) // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 for i := range buf { @@ -503,18 +551,24 @@ func (tx *Tx) write() error { // writeMeta writes the meta to the disk. func (tx *Tx) writeMeta() error { + // gofail: var beforeWriteMetaError string + // return errors.New(beforeWriteMetaError) + // Create a temporary buffer for the meta page. + lg := tx.db.Logger() buf := make([]byte, tx.db.pageSize) p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) + tx.meta.Write(p) // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { + if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil { + lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err) return err } - if !tx.db.NoSync || IgnoreNoSync { + if !tx.db.NoSync || common.IgnoreNoSync { // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { + lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err) return err } } @@ -527,69 +581,69 @@ func (tx *Tx) writeMeta() error { // page returns a reference to the page with a given id. // If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { +func (tx *Tx) page(id common.Pgid) *common.Page { // Check the dirty pages first. if tx.pages != nil { if p, ok := tx.pages[id]; ok { - p.fastCheck(id) + p.FastCheck(id) return p } } // Otherwise return directly from the mmap. p := tx.db.page(id) - p.fastCheck(id) + p.FastCheck(id) return p } // forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgidnum pgid, fn func(*page, int, []pgid)) { - stack := make([]pgid, 10) +func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) { + stack := make([]common.Pgid, 10) stack[0] = pgidnum tx.forEachPageInternal(stack[:1], fn) } -func (tx *Tx) forEachPageInternal(pgidstack []pgid, fn func(*page, int, []pgid)) { +func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) { p := tx.page(pgidstack[len(pgidstack)-1]) // Execute function. fn(p, len(pgidstack)-1, pgidstack) // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPageInternal(append(pgidstack, elem.pgid), fn) + if p.IsBranchPage() { + for i := 0; i < int(p.Count()); i++ { + elem := p.BranchPageElement(uint16(i)) + tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn) } } } // Page returns page information for a given page number. // This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { +func (tx *Tx) Page(id int) (*common.PageInfo, error) { if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { + return nil, berrors.ErrTxClosed + } else if common.Pgid(id) >= tx.meta.Pgid() { return nil, nil } if tx.db.freelist == nil { - return nil, ErrFreePagesNotLoaded + return nil, berrors.ErrFreePagesNotLoaded } // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ + p := tx.db.page(common.Pgid(id)) + info := &common.PageInfo{ ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), + Count: int(p.Count()), + OverflowCount: int(p.Overflow()), } // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { + if tx.db.freelist.Freed(common.Pgid(id)) { info.Type = "free" } else { - info.Type = p.typ() + info.Type = p.Typ() } return info, nil diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go index 75c7c08436dc2..c3ecbb975073a 100644 --- a/vendor/go.etcd.io/bbolt/tx_check.go +++ b/vendor/go.etcd.io/bbolt/tx_check.go @@ -3,6 +3,8 @@ package bbolt import ( "encoding/hex" "fmt" + + "go.etcd.io/bbolt/internal/common" ) // Check performs several consistency checks on the database for this transaction. @@ -13,13 +15,10 @@ import ( // because of caching. This overhead can be removed if running on a read-only // transaction, however, it is not safe to execute other writer transactions at // the same time. -func (tx *Tx) Check() <-chan error { - return tx.CheckWithOptions() -} - -// CheckWithOptions allows users to provide a customized `KVStringer` implementation, +// +// It also allows users to provide a customized `KVStringer` implementation, // so that bolt can generate human-readable diagnostic messages. -func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { +func (tx *Tx) Check(options ...CheckOption) <-chan error { chkConfig := checkConfig{ kvStringer: HexKVStringer(), } @@ -28,18 +27,22 @@ func (tx *Tx) CheckWithOptions(options ...CheckOption) <-chan error { } ch := make(chan error) - go tx.check(chkConfig.kvStringer, ch) + go func() { + // Close the channel to signal completion. + defer close(ch) + tx.check(chkConfig, ch) + }() return ch } -func (tx *Tx) check(kvStringer KVStringer, ch chan error) { +func (tx *Tx) check(cfg checkConfig, ch chan error) { // Force loading free list if opened in ReadOnly mode. tx.db.loadFreelist() // Check if any pages are double freed. - freed := make(map[pgid]bool) - all := make([]pgid, tx.db.freelist.count()) - tx.db.freelist.copyall(all) + freed := make(map[common.Pgid]bool) + all := make([]common.Pgid, tx.db.freelist.Count()) + tx.db.freelist.Copyall(all) for _, id := range all { if freed[id] { ch <- fmt.Errorf("page %d: already freed", id) @@ -48,118 +51,171 @@ func (tx *Tx) check(kvStringer KVStringer, ch chan error) { } // Track every reachable page. - reachable := make(map[pgid]*page) + reachable := make(map[common.Pgid]*common.Page) reachable[0] = tx.page(0) // meta0 reachable[1] = tx.page(1) // meta1 - if tx.meta.freelist != pgidNoFreelist { - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) + if tx.meta.Freelist() != common.PgidNoFreelist { + for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ { + reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist()) } } - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, kvStringer, ch) + if cfg.pageId == 0 { + // Check the whole db file, starting from the root bucket and + // recursively check all child buckets. + tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch) - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + // Ensure all pages below high water mark are either reachable or freed. + for i := common.Pgid(0); i < tx.meta.Pgid(); i++ { + _, isReachable := reachable[i] + if !isReachable && !freed[i] { + ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) + } } + } else { + // Check the db file starting from a specified pageId. + if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) { + ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid()) + return + } + + tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch) } +} - // Close the channel to signal completion. - close(ch) +func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch) } -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, +func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, kvStringer KVStringer, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } + p := tx.page(pageId) - // Check every page used by this bucket. - b.tx.forEachPage(b.root, func(p *page, _ int, stack []pgid) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.id), int(b.tx.meta.pgid), stack) + switch { + case p.IsBranchPage(): + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch) } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + case p.IsLeafPage(): + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + if elem.IsBucketEntry() { + inBkt := common.NewInBucket(pageId, 0) + tmpBucket := Bucket{ + InBucket: &inBkt, + rootNode: &node{isLeaf: p.IsLeafPage()}, + FillPercent: DefaultFillPercent, + tx: tx, + } + if child := tmpBucket.Bucket(elem.Key()); child != nil { + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) + } } - reachable[id] = p } + default: + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId) + } +} - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.id), p.typ(), stack) - } - }) +func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + // Ignore inline buckets. + if b.RootPage() == 0 { + return + } - tx.recursivelyCheckPages(b.root, kvStringer.KeyToString, ch) + tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch) // Check each bucket within this bucket. _ = b.ForEachBucket(func(k []byte) error { if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, kvStringer, ch) + tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch) } return nil }) } -// recursivelyCheckPages confirms database consistency with respect to b-tree +func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, + kvStringer KVStringer, ch chan error) { + tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) { + verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch) + }) + + tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch) +} + +func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) { + if p.Id() > hwm { + ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack) + } + + // Ensure each page is only referenced once. + for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ { + var id = p.Id() + i + if _, ok := reachable[id]; ok { + ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack) + } + reachable[id] = p + } + + // We should only encounter un-freed leaf and branch pages. + if freed[p.Id()] { + ch <- fmt.Errorf("page %d: reachable freed", int(p.Id())) + } else if !p.IsBranchPage() && !p.IsLeafPage() { + ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack) + } +} + +// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree // key order constraints: // - keys on pages must be sorted // - keys on children pages are between 2 consecutive keys on the parent's branch page). -func (tx *Tx) recursivelyCheckPages(pgId pgid, keyToString func([]byte) string, ch chan error) { - tx.recursivelyCheckPagesInternal(pgId, nil, nil, nil, keyToString, ch) +func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) { + tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch) } -// recursivelyCheckPagesInternal verifies that all keys in the subtree rooted at `pgid` are: +// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are: // - >=`minKeyClosed` (can be nil) // - <`maxKeyOpen` (can be nil) // - Are in right ordering relationship to their parents. // `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message. -func (tx *Tx) recursivelyCheckPagesInternal( - pgId pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []pgid, +func (tx *Tx) recursivelyCheckPageKeyOrderInternal( + pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid, keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) { p := tx.page(pgId) pagesStack = append(pagesStack, pgId) switch { - case p.flags&branchPageFlag != 0: + case p.IsBranchPage(): // For branch page we navigate ranges of all subpages. runningMin := minKeyClosed - for i := range p.branchPageElements() { - elem := p.branchPageElement(uint16(i)) - verifyKeyOrder(elem.pgid, "branch", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + for i := range p.BranchPageElements() { + elem := p.BranchPageElement(uint16(i)) + verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) maxKey := maxKeyOpen - if i < len(p.branchPageElements())-1 { - maxKey = p.branchPageElement(uint16(i + 1)).key() + if i < len(p.BranchPageElements())-1 { + maxKey = p.BranchPageElement(uint16(i + 1)).Key() } - maxKeyInSubtree = tx.recursivelyCheckPagesInternal(elem.pgid, elem.key(), maxKey, pagesStack, keyToString, ch) + maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch) runningMin = maxKeyInSubtree } return maxKeyInSubtree - case p.flags&leafPageFlag != 0: + case p.IsLeafPage(): runningMin := minKeyClosed - for i := range p.leafPageElements() { - elem := p.leafPageElement(uint16(i)) - verifyKeyOrder(pgId, "leaf", i, elem.key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) - runningMin = elem.key() + for i := range p.LeafPageElements() { + elem := p.LeafPageElement(uint16(i)) + verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack) + runningMin = elem.Key() } - if p.count > 0 { - return p.leafPageElement(p.count - 1).key() + if p.Count() > 0 { + return p.LeafPageElement(p.Count() - 1).Key() } default: - ch <- fmt.Errorf("unexpected page type for pgId:%d", pgId) + ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId) } return maxKeyInSubtree } @@ -168,7 +224,7 @@ func (tx *Tx) recursivelyCheckPagesInternal( * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key", * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch). */ -func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []pgid) { +func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) { if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 { ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v", index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack) @@ -194,6 +250,7 @@ func verifyKeyOrder(pgId pgid, pageType string, index int, key []byte, previousK type checkConfig struct { kvStringer KVStringer + pageId uint64 } type CheckOption func(options *checkConfig) @@ -204,6 +261,13 @@ func WithKVStringer(kvStringer KVStringer) CheckOption { } } +// WithPageId sets a page ID from which the check command starts to check +func WithPageId(pageId uint64) CheckOption { + return func(c *checkConfig) { + c.pageId = pageId + } +} + // KVStringer allows to prepare human-readable diagnostic messages. type KVStringer interface { KeyToString([]byte) string diff --git a/vendor/modules.txt b/vendor/modules.txt index 8c63de7f6e657..5b3f0055c8498 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1620,7 +1620,7 @@ github.com/spf13/afero/mem # github.com/spf13/cast v1.7.0 ## explicit; go 1.19 github.com/spf13/cast -# github.com/spf13/pflag v1.0.5 +# github.com/spf13/pflag v1.0.6 ## explicit; go 1.12 github.com/spf13/pflag # github.com/stretchr/objx v0.5.2 @@ -1734,9 +1734,12 @@ github.com/yuin/gopher-lua/pm # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 github.com/yusufpapurcu/wmi -# go.etcd.io/bbolt v1.3.11 -## explicit; go 1.22 +# go.etcd.io/bbolt v1.4.0 +## explicit; go 1.23 go.etcd.io/bbolt +go.etcd.io/bbolt/errors +go.etcd.io/bbolt/internal/common +go.etcd.io/bbolt/internal/freelist # go.etcd.io/etcd/api/v3 v3.5.4 ## explicit; go 1.16 go.etcd.io/etcd/api/v3/authpb