From 894a09c16e98e2a81ae26f6ad9f091e2c5230d65 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 11:33:39 -0500 Subject: [PATCH 1/2] Document that `pipeline` needs to be lowercase (#42438) (#42460) Document that the ingest pipeline ID from Elasticsearch defined in the input or output configuration is always lowercased, thus the ingest pipeline in Elasticsearch can only use lowercase names. (cherry picked from commit 8406c86e9a36a50533eea6313acda3c87101f92c) Co-authored-by: Tiago Queiroz --- filebeat/docs/inputs/input-common-options.asciidoc | 3 +++ libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc | 3 +++ 2 files changed, 6 insertions(+) diff --git a/filebeat/docs/inputs/input-common-options.asciidoc b/filebeat/docs/inputs/input-common-options.asciidoc index eaa1ffe6ac8..cbd7a06f9cd 100644 --- a/filebeat/docs/inputs/input-common-options.asciidoc +++ b/filebeat/docs/inputs/input-common-options.asciidoc @@ -84,6 +84,9 @@ this option usually results in simpler configuration files. If the pipeline is configured both in the input and output, the option from the input is used. +IMPORTANT: The `pipeline` is always lowercased. If `pipeline: Foo-Bar`, then +the pipeline name in {es} needs to be defined as `foo-bar`. + [float] ===== `keep_null` diff --git a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc index 3bfc0ad9bd5..dd175fa4531 100644 --- a/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc +++ b/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc @@ -467,6 +467,9 @@ output.elasticsearch: pipeline: my_pipeline_id ------------------------------------------------------------------------------ +IMPORTANT: The `pipeline` is always lowercased. If `pipeline: Foo-Bar`, then +the pipeline name in {es} needs to be defined as `foo-bar`. + For more information, see <>. ifndef::apm-server[] From 6a83a522a94f400ffe0db80ac79e36736dac6afc Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 31 Jan 2025 17:42:06 +0100 Subject: [PATCH 2/2] Include full queue metrics in the monitoring index (#42439) (#42519) Add queue metrics to the Metricbeat monitoring schema so they can be included in standard Agent dashboards. This is the Beats-side half of https://github.com/elastic/beats/issues/42093. Affected metrics are: `added.{events, bytes}`, `consumed.{events, bytes}`, `removed.{events, bytes}`, and `filled.{events, bytes, pct}`, all within `monitoring.metrics.libbeat.pipeline.queue`. (cherry picked from commit 7eb2bdd28c04f0f66638e2e4feea8fe32159c795) Co-authored-by: Fae Charlton --- CHANGELOG.next.asciidoc | 14 ++++++++++++ libbeat/publisher/pipeline/monitoring.go | 18 --------------- metricbeat/module/beat/stats/data.go | 29 +++++++++++++++++++++++- 3 files changed, 42 insertions(+), 19 deletions(-) diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index e347f68d5e3..f97950151fe 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -78,6 +78,20 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Support Elastic Agent control protocol chunking support {pull}37343[37343] - Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816] - Set timeout of 1 minute for FQDN requests {pull}37756[37756] +- Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356] +- Ensure Elasticsearch output can always recover from network errors {pull}40794[40794] +- Add `translate_ldap_attribute` processor. {pull}41472[41472] +- Remove unnecessary debug logs during idle connection teardown {issue}40824[40824] +- Remove unnecessary reload for Elastic Agent managed beats when apm tracing config changes from nil to nil {pull}41794[41794] +- Fix incorrect cloud provider identification in add_cloud_metadata processor using provider priority mechanism {pull}41636[41636] +- Prevent panic if libbeat processors are loaded more than once. {issue}41475[41475] {pull}41857[51857] +- Allow network condition to handle field values that are arrays of IP addresses. {pull}41918[41918] +- Fix a bug where log files are rotated on startup when interval is configured and rotateonstartup is disabled {issue}41894[41894] {pull}41895[41895] +- Fix setting unique registry for non beat receivers {issue}42288[42288] {pull}42292[42292] +- The Kafka output now drops events when there is an authorisation error {issue}42343[42343] {pull}42401[42401] +- Fix autodiscovery memory leak related to metadata of start events {pull}41748[41748] +- All standard queue metrics are now included in metrics monitoring, including: `added.{events, bytes}`, `consumed.{events, bytes}`, `removed.{events, bytes}`, and `filled.{events, bytes, pct}`. {pull}42439[42439] +- The following output latency metrics are now included in metrics monitoring: `output.latency.{count, max, median, p99}`. {pull}42439[42439] *Auditbeat* diff --git a/libbeat/publisher/pipeline/monitoring.go b/libbeat/publisher/pipeline/monitoring.go index 4a1e5ad76a1..50a32ad13fb 100644 --- a/libbeat/publisher/pipeline/monitoring.go +++ b/libbeat/publisher/pipeline/monitoring.go @@ -74,11 +74,6 @@ type metricsObserverVars struct { eventsTotal, eventsFiltered, eventsPublished, eventsFailed *monitoring.Uint eventsDropped, eventsRetry *monitoring.Uint // (retryer) drop/retry counters activeEvents *monitoring.Uint - - // queue metrics - queueACKed *monitoring.Uint - queueMaxEvents *monitoring.Uint - percentQueueFull *monitoring.Float } func newMetricsObserver(metrics *monitoring.Registry) *metricsObserver { @@ -118,19 +113,6 @@ func newMetricsObserver(metrics *monitoring.Registry) *metricsObserver { // events.dropped counts events that were dropped because errors from // the output workers exceeded the configured maximum retry count. eventsDropped: monitoring.NewUint(reg, "events.dropped"), - - // (Gauge) queue.max_events measures the maximum number of events the - // queue will accept, or 0 if there is none. - queueMaxEvents: monitoring.NewUint(reg, "queue.max_events"), - - // queue.acked counts events that have been acknowledged by the output - // workers. This includes events that were dropped for fatal errors, - // which are also reported in events.dropped. - queueACKed: monitoring.NewUint(reg, "queue.acked"), - - // (Gauge) queue.filled.pct.events measures the fraction (from 0 to 1) - // of the queue's event capacity that is currently filled. - percentQueueFull: monitoring.NewFloat(reg, "queue.filled.pct.events"), }, } } diff --git a/metricbeat/module/beat/stats/data.go b/metricbeat/module/beat/stats/data.go index 3df496f0a95..8cb11866969 100644 --- a/metricbeat/module/beat/stats/data.go +++ b/metricbeat/module/beat/stats/data.go @@ -71,13 +71,40 @@ var ( "write": c.Dict("write", s.Schema{ "bytes": c.Int("bytes"), "errors": c.Int("errors"), + "latency": c.Dict("latency", s.Schema{ + "count": c.Int("count"), + "max": c.Int("max"), + "median": c.Float("median"), + "p99": c.Float("p99"), + }), }), }), "pipeline": c.Dict("pipeline", s.Schema{ "clients": c.Int("clients"), "queue": c.Dict("queue", s.Schema{ - "acked": c.Int("acked"), "max_events": c.Int("max_events"), + + "added": c.Dict("added", s.Schema{ + "events": c.Int("events"), + "bytes": c.Int("bytes"), + }), + "consumed": c.Dict("consumed", s.Schema{ + "events": c.Int("events"), + "bytes": c.Int("bytes"), + }), + "removed": c.Dict("removed", s.Schema{ + "events": c.Int("events"), + "bytes": c.Int("bytes"), + }), + "filled": c.Dict("filled", s.Schema{ + "events": c.Int("events"), + "bytes": c.Int("bytes"), + "pct": c.Float("pct"), + }), + + // Backwards compatibility: "acked" is the old name for + // "removed.events" and should not be used by new code/dashboards. + "acked": c.Int("acked"), }), "events": c.Dict("events", s.Schema{ "active": c.Int("active"),