Skip to content

Commit

Permalink
Merge branch '8.17' into mergify/bp/8.17/pr-42514
Browse files Browse the repository at this point in the history
  • Loading branch information
strawgate authored Jan 31, 2025
2 parents 45b8c26 + 6a83a52 commit 4c75eea
Show file tree
Hide file tree
Showing 5 changed files with 42 additions and 19 deletions.
14 changes: 14 additions & 0 deletions CHANGELOG.next.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,20 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff]
- Support Elastic Agent control protocol chunking support {pull}37343[37343]
- Lower logging level to debug when attempting to configure beats with unknown fields from autodiscovered events/environments {pull}[37816][37816]
- Set timeout of 1 minute for FQDN requests {pull}37756[37756]
- Fix issue where old data could be saved in the memory queue after acknowledgment, increasing memory use {pull}41356[41356]
- Ensure Elasticsearch output can always recover from network errors {pull}40794[40794]
- Add `translate_ldap_attribute` processor. {pull}41472[41472]
- Remove unnecessary debug logs during idle connection teardown {issue}40824[40824]
- Remove unnecessary reload for Elastic Agent managed beats when apm tracing config changes from nil to nil {pull}41794[41794]
- Fix incorrect cloud provider identification in add_cloud_metadata processor using provider priority mechanism {pull}41636[41636]
- Prevent panic if libbeat processors are loaded more than once. {issue}41475[41475] {pull}41857[51857]
- Allow network condition to handle field values that are arrays of IP addresses. {pull}41918[41918]
- Fix a bug where log files are rotated on startup when interval is configured and rotateonstartup is disabled {issue}41894[41894] {pull}41895[41895]
- Fix setting unique registry for non beat receivers {issue}42288[42288] {pull}42292[42292]
- The Kafka output now drops events when there is an authorisation error {issue}42343[42343] {pull}42401[42401]
- Fix autodiscovery memory leak related to metadata of start events {pull}41748[41748]
- All standard queue metrics are now included in metrics monitoring, including: `added.{events, bytes}`, `consumed.{events, bytes}`, `removed.{events, bytes}`, and `filled.{events, bytes, pct}`. {pull}42439[42439]
- The following output latency metrics are now included in metrics monitoring: `output.latency.{count, max, median, p99}`. {pull}42439[42439]

*Auditbeat*

Expand Down
3 changes: 3 additions & 0 deletions filebeat/docs/inputs/input-common-options.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,9 @@ this option usually results in simpler configuration files. If the pipeline is
configured both in the input and output, the option from the
input is used.

IMPORTANT: The `pipeline` is always lowercased. If `pipeline: Foo-Bar`, then
the pipeline name in {es} needs to be defined as `foo-bar`.

[float]
===== `keep_null`

Expand Down
3 changes: 3 additions & 0 deletions libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -467,6 +467,9 @@ output.elasticsearch:
pipeline: my_pipeline_id
------------------------------------------------------------------------------

IMPORTANT: The `pipeline` is always lowercased. If `pipeline: Foo-Bar`, then
the pipeline name in {es} needs to be defined as `foo-bar`.

For more information, see <<configuring-ingest-node>>.

ifndef::apm-server[]
Expand Down
18 changes: 0 additions & 18 deletions libbeat/publisher/pipeline/monitoring.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,6 @@ type metricsObserverVars struct {
eventsTotal, eventsFiltered, eventsPublished, eventsFailed *monitoring.Uint
eventsDropped, eventsRetry *monitoring.Uint // (retryer) drop/retry counters
activeEvents *monitoring.Uint

// queue metrics
queueACKed *monitoring.Uint
queueMaxEvents *monitoring.Uint
percentQueueFull *monitoring.Float
}

func newMetricsObserver(metrics *monitoring.Registry) *metricsObserver {
Expand Down Expand Up @@ -118,19 +113,6 @@ func newMetricsObserver(metrics *monitoring.Registry) *metricsObserver {
// events.dropped counts events that were dropped because errors from
// the output workers exceeded the configured maximum retry count.
eventsDropped: monitoring.NewUint(reg, "events.dropped"),

// (Gauge) queue.max_events measures the maximum number of events the
// queue will accept, or 0 if there is none.
queueMaxEvents: monitoring.NewUint(reg, "queue.max_events"),

// queue.acked counts events that have been acknowledged by the output
// workers. This includes events that were dropped for fatal errors,
// which are also reported in events.dropped.
queueACKed: monitoring.NewUint(reg, "queue.acked"),

// (Gauge) queue.filled.pct.events measures the fraction (from 0 to 1)
// of the queue's event capacity that is currently filled.
percentQueueFull: monitoring.NewFloat(reg, "queue.filled.pct.events"),
},
}
}
Expand Down
23 changes: 22 additions & 1 deletion metricbeat/module/beat/stats/data.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,29 @@ var (
"pipeline": c.Dict("pipeline", s.Schema{
"clients": c.Int("clients"),
"queue": c.Dict("queue", s.Schema{
"acked": c.Int("acked"),
"max_events": c.Int("max_events"),

"added": c.Dict("added", s.Schema{
"events": c.Int("events"),
"bytes": c.Int("bytes"),
}),
"consumed": c.Dict("consumed", s.Schema{
"events": c.Int("events"),
"bytes": c.Int("bytes"),
}),
"removed": c.Dict("removed", s.Schema{
"events": c.Int("events"),
"bytes": c.Int("bytes"),
}),
"filled": c.Dict("filled", s.Schema{
"events": c.Int("events"),
"bytes": c.Int("bytes"),
"pct": c.Float("pct"),
}),

// Backwards compatibility: "acked" is the old name for
// "removed.events" and should not be used by new code/dashboards.
"acked": c.Int("acked"),
}),
"events": c.Dict("events", s.Schema{
"active": c.Int("active"),
Expand Down

0 comments on commit 4c75eea

Please sign in to comment.