diff --git a/.github/workflows/xk6-tests/xk6-test.js b/.github/workflows/xk6-tests/xk6-test.js index 093bad76cc6..a20de89a007 100644 --- a/.github/workflows/xk6-tests/xk6-test.js +++ b/.github/workflows/xk6-tests/xk6-test.js @@ -11,7 +11,7 @@ export let options = { export function handleSummary(data) { return { - 'summary-results.txt': data.metrics.foos.values.count.toString(), + 'summary-results.txt': data.metrics.custom.foos.values.count.toString(), }; } diff --git a/internal/cmd/builtin_output_gen.go b/internal/cmd/builtin_output_gen.go index 35fe34505b8..8983063e8f5 100644 --- a/internal/cmd/builtin_output_gen.go +++ b/internal/cmd/builtin_output_gen.go @@ -7,11 +7,11 @@ import ( "strings" ) -const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry" +const _builtinOutputName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetrysummary" -var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64, 90} +var _builtinOutputIndex = [...]uint8{0, 5, 8, 15, 41, 49, 53, 58, 64, 90, 97} -const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetry" +const _builtinOutputLowerName = "cloudcsvdatadogexperimental-prometheus-rwinfluxdbjsonkafkastatsdexperimental-opentelemetrysummary" func (i builtinOutput) String() string { if i >= builtinOutput(len(_builtinOutputIndex)-1) { @@ -33,9 +33,10 @@ func _builtinOutputNoOp() { _ = x[builtinOutputKafka-(6)] _ = x[builtinOutputStatsd-(7)] _ = x[builtinOutputExperimentalOpentelemetry-(8)] + _ = x[builtinOutputSummary-(9)] } -var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd, builtinOutputExperimentalOpentelemetry} +var _builtinOutputValues = []builtinOutput{builtinOutputCloud, builtinOutputCSV, builtinOutputDatadog, builtinOutputExperimentalPrometheusRW, builtinOutputInfluxdb, builtinOutputJSON, builtinOutputKafka, builtinOutputStatsd, builtinOutputExperimentalOpentelemetry, builtinOutputSummary} var _builtinOutputNameToValueMap = map[string]builtinOutput{ _builtinOutputName[0:5]: builtinOutputCloud, @@ -56,6 +57,8 @@ var _builtinOutputNameToValueMap = map[string]builtinOutput{ _builtinOutputLowerName[58:64]: builtinOutputStatsd, _builtinOutputName[64:90]: builtinOutputExperimentalOpentelemetry, _builtinOutputLowerName[64:90]: builtinOutputExperimentalOpentelemetry, + _builtinOutputName[90:97]: builtinOutputSummary, + _builtinOutputLowerName[90:97]: builtinOutputSummary, } var _builtinOutputNames = []string{ @@ -68,6 +71,7 @@ var _builtinOutputNames = []string{ _builtinOutputName[53:58], _builtinOutputName[58:64], _builtinOutputName[64:90], + _builtinOutputName[90:97], } // builtinOutputString retrieves an enum value from the enum constants string name. diff --git a/internal/cmd/outputs.go b/internal/cmd/outputs.go index aa3dd02ed0c..dc9fe8c7d59 100644 --- a/internal/cmd/outputs.go +++ b/internal/cmd/outputs.go @@ -37,6 +37,7 @@ const ( builtinOutputKafka builtinOutputStatsd builtinOutputExperimentalOpentelemetry + builtinOutputSummary ) // TODO: move this to an output sub-module after we get rid of the old collectors? diff --git a/internal/cmd/outputs_test.go b/internal/cmd/outputs_test.go index 0a7d916d308..02c38759327 100644 --- a/internal/cmd/outputs_test.go +++ b/internal/cmd/outputs_test.go @@ -11,6 +11,7 @@ func TestBuiltinOutputString(t *testing.T) { exp := []string{ "cloud", "csv", "datadog", "experimental-prometheus-rw", "influxdb", "json", "kafka", "statsd", "experimental-opentelemetry", + "summary", } assert.Equal(t, exp, builtinOutputStrings()) } diff --git a/internal/cmd/run.go b/internal/cmd/run.go index e8a66201b0c..05476c21443 100644 --- a/internal/cmd/run.go +++ b/internal/cmd/run.go @@ -31,6 +31,7 @@ import ( "go.k6.io/k6/lib/fsext" "go.k6.io/k6/metrics" "go.k6.io/k6/output" + "go.k6.io/k6/output/summary" ) // cmdRun handles the `k6 run` sub-command @@ -189,26 +190,76 @@ func (c *cmdRun) run(cmd *cobra.Command, args []string) (err error) { } executionState := execScheduler.GetState() - if !testRunState.RuntimeOptions.NoSummary.Bool { - defer func() { - logger.Debug("Generating the end-of-test summary...") - summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, &lib.Summary{ - Metrics: metricsEngine.ObservedMetrics, - RootGroup: testRunState.GroupSummary.Group(), - TestRunDuration: executionState.GetCurrentTestRunDuration(), - NoColor: c.gs.Flags.NoColor, - UIState: lib.UIState{ - IsStdOutTTY: c.gs.Stdout.IsTTY, - IsStdErrTTY: c.gs.Stderr.IsTTY, - }, + if !testRunState.RuntimeOptions.NoSummary.Bool { //nolint:nestif + sm, err := lib.ValidateSummaryMode(testRunState.RuntimeOptions.SummaryMode.String) + if err != nil { + logger.WithError(err).Error("invalid summary mode, falling back to \"compact\" (default)") + } + + switch sm { + // TODO: Remove this code block once we stop supporting the legacy summary, and just leave the default. + case lib.SummaryModeLegacy: + // At the end of the test run + defer func() { + logger.Debug("Generating the end-of-test summary...") + + legacySummary := &lib.LegacySummary{ + Metrics: metricsEngine.ObservedMetrics, + RootGroup: testRunState.GroupSummary.Group(), + TestRunDuration: executionState.GetCurrentTestRunDuration(), + NoColor: c.gs.Flags.NoColor, + UIState: lib.UIState{ + IsStdOutTTY: c.gs.Stdout.IsTTY, + IsStdErrTTY: c.gs.Stderr.IsTTY, + }, + } + + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, legacySummary, nil) + if hsErr == nil { + hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) + } + if hsErr != nil { + logger.WithError(hsErr).Error("failed to handle the end-of-test summary") + } + }() + default: + // Instantiates the summary output + summaryOutput, err := summary.New(output.Params{ + RuntimeOptions: testRunState.RuntimeOptions, + Logger: c.gs.Logger, }) - if hsErr == nil { - hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) - } - if hsErr != nil { - logger.WithError(hsErr).Error("failed to handle the end-of-test summary") + if err != nil { + logger.WithError(err).Error("failed to initialize the end-of-test summary output") } - }() + outputs = append(outputs, summaryOutput) + + // At the end of the test run + defer func() { + logger.Debug("Generating the end-of-test summary...") + + summary := summaryOutput.Summary( + executionState, + metricsEngine.ObservedMetrics, + test.initRunner.GetOptions(), + ) + + // TODO: We should probably try to move these out of the summary, + // likely as an additional argument like options. + summary.NoColor = c.gs.Flags.NoColor + summary.UIState = lib.UIState{ + IsStdOutTTY: c.gs.Stdout.IsTTY, + IsStdErrTTY: c.gs.Stderr.IsTTY, + } + + summaryResult, hsErr := test.initRunner.HandleSummary(globalCtx, nil, summary) + if hsErr == nil { + hsErr = handleSummaryResult(c.gs.FS, c.gs.Stdout, c.gs.Stderr, summaryResult) + } + if hsErr != nil { + logger.WithError(hsErr).Error("failed to handle the end-of-test summary") + } + }() + } } waitInitDone := emitEvent(&event.Event{Type: event.Init}) diff --git a/internal/cmd/run_test.go b/internal/cmd/run_test.go index 93e7a902ed2..39ecd27127b 100644 --- a/internal/cmd/run_test.go +++ b/internal/cmd/run_test.go @@ -326,7 +326,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { name: "#2518: submetrics without values should be rendered under their parent metric #2518", testFilename: "thresholds/thresholds_on_submetric_without_samples.js", expExitCode: 0, - expStdoutContains: " one..................: 0 0/s\n { tag:xyz }........: 0 0/s\n", + expStdoutContains: " one....................................: 0 0/s\n { tag:xyz }..........................: 0 0/s\n", }, { name: "#2512: parsing threshold names containing parsable tokens should be valid", @@ -337,7 +337,7 @@ func TestThresholdsRuntimeBehavior(t *testing.T) { name: "#2520: thresholds over metrics without values should avoid division by zero and displaying NaN values", testFilename: "thresholds/empty_sink_no_nan.js", expExitCode: 0, - expStdoutContains: "rate.................: 0.00%", + expStdoutContains: "rate...................................: 0.00%", expStdoutNotContains: "NaN", }, } diff --git a/internal/cmd/runtime_options.go b/internal/cmd/runtime_options.go index dd6cef91dc4..dceda9f955f 100644 --- a/internal/cmd/runtime_options.go +++ b/internal/cmd/runtime_options.go @@ -31,6 +31,8 @@ extended: base + sets "global" as alias for "globalThis" flags.StringArrayP("env", "e", nil, "add/override environment variable with `VAR=value`") flags.Bool("no-thresholds", false, "don't run thresholds") flags.Bool("no-summary", false, "don't show the summary at the end of the test") + flags.String("with-summary", lib.SummaryModeCompact.String(), "determine the summary mode,"+ + " \"compact\", \"full\" or \"legacy\"") flags.String( "summary-export", "", @@ -41,94 +43,119 @@ extended: base + sets "global" as alias for "globalThis" return flags } -func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bool) error { - strValue, ok := env[varName] - if !ok { - return nil +func getRuntimeOptions( + flags *pflag.FlagSet, + environment map[string]string, +) (lib.RuntimeOptions, error) { + // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste + // TODO: get these options out of the JSON config file as well? + opts, err := populateRuntimeOptionsFromEnv(runtimeOptionsFromFlags(flags), environment) + if err != nil { + return opts, err } - val, err := strconv.ParseBool(strValue) + + // Set/overwrite environment variables with custom user-supplied values + envVars, err := flags.GetStringArray("env") if err != nil { - return fmt.Errorf("env var '%s' is not a valid boolean value: %w", varName, err) + return opts, err } - // Only override if not explicitly set via the CLI flag - if !placeholder.Valid { - *placeholder = null.BoolFrom(val) + + for _, kv := range envVars { + k, v := state.ParseEnvKeyValue(kv) + // Allow only alphanumeric ASCII variable names for now + if !userEnvVarName.MatchString(k) { + return opts, fmt.Errorf("invalid environment variable name '%s'", k) + } + opts.Env[k] = v } - return nil + + return opts, nil } -func getRuntimeOptions(flags *pflag.FlagSet, environment map[string]string) (lib.RuntimeOptions, error) { - // TODO: refactor with composable helpers as a part of #883, to reduce copy-paste - // TODO: get these options out of the JSON config file as well? +func runtimeOptionsFromFlags(flags *pflag.FlagSet) lib.RuntimeOptions { opts := lib.RuntimeOptions{ TestType: getNullString(flags, "type"), IncludeSystemEnvVars: getNullBool(flags, "include-system-env-vars"), CompatibilityMode: getNullString(flags, "compatibility-mode"), NoThresholds: getNullBool(flags, "no-thresholds"), NoSummary: getNullBool(flags, "no-summary"), + SummaryMode: getNullString(flags, "with-summary"), SummaryExport: getNullString(flags, "summary-export"), TracesOutput: getNullString(flags, "traces-output"), Env: make(map[string]string), } + return opts +} + +func populateRuntimeOptionsFromEnv(opts lib.RuntimeOptions, environment map[string]string) (lib.RuntimeOptions, error) { + // Only override if not explicitly set via the CLI flag - if envVar, ok := environment["K6_TYPE"]; ok && !opts.TestType.Valid { - // Only override if not explicitly set via the CLI flag + if envVar, ok := environment["K6_TYPE"]; !opts.TestType.Valid && ok { opts.TestType = null.StringFrom(envVar) } - if envVar, ok := environment["K6_COMPATIBILITY_MODE"]; ok && !opts.CompatibilityMode.Valid { - // Only override if not explicitly set via the CLI flag + + if envVar, ok := environment["K6_COMPATIBILITY_MODE"]; !opts.CompatibilityMode.Valid && ok { opts.CompatibilityMode = null.StringFrom(envVar) } - if _, err := lib.ValidateCompatibilityMode(opts.CompatibilityMode.String); err != nil { - // some early validation - return opts, err + + if envVar, ok := environment["K6_WITH_SUMMARY"]; !opts.SummaryMode.Valid && ok { + opts.SummaryMode = null.StringFrom(envVar) } if err := saveBoolFromEnv(environment, "K6_INCLUDE_SYSTEM_ENV_VARS", &opts.IncludeSystemEnvVars); err != nil { return opts, err } + if err := saveBoolFromEnv(environment, "K6_NO_THRESHOLDS", &opts.NoThresholds); err != nil { return opts, err } + if err := saveBoolFromEnv(environment, "K6_NO_SUMMARY", &opts.NoSummary); err != nil { return opts, err } - if envVar, ok := environment["K6_SUMMARY_EXPORT"]; ok { - if !opts.SummaryExport.Valid { - opts.SummaryExport = null.StringFrom(envVar) - } + if _, err := lib.ValidateCompatibilityMode(opts.CompatibilityMode.String); err != nil { + // some early validation + return opts, err } - if envVar, ok := environment["SSLKEYLOGFILE"]; ok { - if !opts.KeyWriter.Valid { - opts.KeyWriter = null.StringFrom(envVar) - } + if _, err := lib.ValidateSummaryMode(opts.SummaryMode.String); err != nil { + // some early validation + return opts, err } - if envVar, ok := environment["K6_TRACES_OUTPUT"]; ok { - if !opts.TracesOutput.Valid { - opts.TracesOutput = null.StringFrom(envVar) - } + if envVar, ok := environment["K6_SUMMARY_EXPORT"]; !opts.SummaryExport.Valid && ok { + opts.SummaryExport = null.StringFrom(envVar) } - if opts.IncludeSystemEnvVars.Bool { // If enabled, gather the actual system environment variables - opts.Env = environment + if envVar, ok := environment["SSLKEYLOGFILE"]; !opts.KeyWriter.Valid && ok { + opts.KeyWriter = null.StringFrom(envVar) } - // Set/overwrite environment variables with custom user-supplied values - envVars, err := flags.GetStringArray("env") - if err != nil { - return opts, err + if envVar, ok := environment["K6_TRACES_OUTPUT"]; !opts.TracesOutput.Valid && ok { + opts.TracesOutput = null.StringFrom(envVar) } - for _, kv := range envVars { - k, v := state.ParseEnvKeyValue(kv) - // Allow only alphanumeric ASCII variable names for now - if !userEnvVarName.MatchString(k) { - return opts, fmt.Errorf("invalid environment variable name '%s'", k) - } - opts.Env[k] = v + + // If enabled, gather the actual system environment variables + if opts.IncludeSystemEnvVars.Bool { + opts.Env = environment } return opts, nil } + +func saveBoolFromEnv(env map[string]string, varName string, placeholder *null.Bool) error { + strValue, ok := env[varName] + if !ok { + return nil + } + val, err := strconv.ParseBool(strValue) + if err != nil { + return fmt.Errorf("env var '%s' is not a valid boolean value: %w", varName, err) + } + // Only override if not explicitly set via the CLI flag + if !placeholder.Valid { + *placeholder = null.BoolFrom(val) + } + return nil +} diff --git a/internal/cmd/runtime_options_test.go b/internal/cmd/runtime_options_test.go index fe9b21eb416..6f5c9dd97a2 100644 --- a/internal/cmd/runtime_options_test.go +++ b/internal/cmd/runtime_options_test.go @@ -114,6 +114,7 @@ func TestRuntimeOptions(t *testing.T) { extendedCompatMode = null.NewString("extended", true) enhancedCompatMode = null.NewString("experimental_enhanced", true) defaultTracesOutput = null.NewString("none", false) + defaultSummaryMode = null.NewString("compact", false) ) runtimeOptionsTestCases := map[string]runtimeOptionsTestCase{ @@ -125,6 +126,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: nil, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default": { @@ -135,6 +137,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default with ext compat mode": { @@ -145,6 +148,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by default with experimental_enhanced compat mode": { @@ -155,6 +159,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: enhancedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by cli 1": { @@ -166,6 +171,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: baseCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by cli 2": { @@ -177,6 +183,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: baseCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled sys env by env": { @@ -187,6 +194,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by env": { @@ -197,6 +205,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: extendedCompatMode, Env: map[string]string{"K6_INCLUDE_SYSTEM_ENV_VARS": "true", "K6_COMPATIBILITY_MODE": "extended"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by default": { @@ -208,6 +217,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by cli 1": { @@ -219,6 +229,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "enabled sys env by cli 2": { @@ -230,6 +241,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "run only system env": { @@ -241,6 +253,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "mixed system and cli env": { @@ -252,6 +265,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "mixed system and cli env 2": { @@ -263,6 +277,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1", "test2": "", "test3": "val3", "test4": "", "test5": ""}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "disabled system env with cli params": { @@ -274,6 +289,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test2": "val2"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "overwriting system env with cli param": { @@ -285,6 +301,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "val1cli"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "error wrong compat mode env var value": { @@ -327,6 +344,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "value 1", "test2": "value 2"}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "valid env vars with special chars": { @@ -338,6 +356,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{"test1": "value 1", "test2": "value,2", "test3": ` , ,,, value, ,, 2!'@#,"`}, TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "summary and thresholds from env": { @@ -351,6 +370,7 @@ func TestRuntimeOptions(t *testing.T) { NoSummary: null.NewBool(false, true), SummaryExport: null.NewString("foo", true), TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "summary and thresholds from env overwritten by CLI": { @@ -365,6 +385,7 @@ func TestRuntimeOptions(t *testing.T) { NoSummary: null.NewBool(true, true), SummaryExport: null.NewString("bar", true), TracesOutput: defaultTracesOutput, + SummaryMode: defaultSummaryMode, }, }, "env var error detected even when CLI flags overwrite 1": { @@ -386,6 +407,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("none", false), + SummaryMode: defaultSummaryMode, }, }, "traces output from env": { @@ -396,6 +418,7 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("foo", true), + SummaryMode: defaultSummaryMode, }, }, "traces output from env overwritten by CLI": { @@ -407,6 +430,30 @@ func TestRuntimeOptions(t *testing.T) { CompatibilityMode: defaultCompatMode, Env: map[string]string{}, TracesOutput: null.NewString("bar", true), + SummaryMode: defaultSummaryMode, + }, + }, + "summary mode from env": { + useSysEnv: false, + systemEnv: map[string]string{"K6_WITH_SUMMARY": "full"}, + expRTOpts: lib.RuntimeOptions{ + IncludeSystemEnvVars: null.NewBool(false, false), + CompatibilityMode: defaultCompatMode, + Env: map[string]string{}, + TracesOutput: defaultTracesOutput, + SummaryMode: null.NewString("full", true), + }, + }, + "summary mode from env overwritten by CLI": { + useSysEnv: false, + systemEnv: map[string]string{"K6_WITH_SUMMARY": "full"}, + cliFlags: []string{"--with-summary", "legacy"}, + expRTOpts: lib.RuntimeOptions{ + IncludeSystemEnvVars: null.NewBool(false, false), + CompatibilityMode: defaultCompatMode, + Env: map[string]string{}, + TracesOutput: defaultTracesOutput, + SummaryMode: null.NewString("legacy", true), }, }, } diff --git a/internal/cmd/testdata/summary/api.js b/internal/cmd/testdata/summary/api.js new file mode 100644 index 00000000000..9b7ee0bd037 --- /dev/null +++ b/internal/cmd/testdata/summary/api.js @@ -0,0 +1,47 @@ +import http from 'k6/http' +import {check, group} from 'k6' +import {Trend} from 'k6/metrics'; + +const myTrend = new Trend('waiting_time'); + +export function apiTest() { + const res = http.get('https://httpbin.org/get') + myTrend.add(res.timings.waiting); + check(res, { + 'httpbin.org is up': (r) => r.status === 200, + 'httpbin.org is down': (r) => r.status === 500, + }) + + group('auth', () => { + const res = http.post( + 'https://httpbin.org/auth', + JSON.stringify({ + username: 'sakai', + first_name: 'jin', + last_name: 'sakai', + email: 'jin.sakai@suckerpunch.com', + password: 'onegaishimasu', + }) + ) + myTrend.add(res.timings.waiting); + check(res, { + 'status is 201 CREATED': (r) => r.status === 201, + }) + + group('authorized crocodiles', () => { + const res = http.get('https://httpbin.org/get') + myTrend.add(res.timings.waiting); + check(res, { + 'authorized crocodiles are 200 OK': (r) => r.status === 200, + }) + }) + }) + + group('my crocodiles', () => { + const res = http.get('https://httpbin.org/get') + myTrend.add(res.timings.waiting); + check(res, { + 'my crocodiles are 200 OK': (r) => r.status === 200, + }) + }) +} \ No newline at end of file diff --git a/internal/cmd/testdata/summary/browser.js b/internal/cmd/testdata/summary/browser.js new file mode 100644 index 00000000000..6a04b04724f --- /dev/null +++ b/internal/cmd/testdata/summary/browser.js @@ -0,0 +1,12 @@ +import {browser} from 'k6/browser' + +export async function browserTest() { + const page = await browser.newPage() + + try { + await page.goto('https://test.k6.io/') + await page.screenshot({path: 'screenshots/screenshot.png'}) + } finally { + await page.close() + } +} diff --git a/internal/cmd/testdata/summary/grpc.js b/internal/cmd/testdata/summary/grpc.js new file mode 100644 index 00000000000..725e948a87a --- /dev/null +++ b/internal/cmd/testdata/summary/grpc.js @@ -0,0 +1,23 @@ +import grpc from 'k6/net/grpc'; +import {check} from 'k6' + +const GRPC_ADDR = __ENV.GRPC_ADDR || '127.0.0.1:10000'; +const GRPC_PROTO_PATH = __ENV.GRPC_PROTO_PATH || '../../../lib/testutils/grpcservice/route_guide.proto'; + +let client = new grpc.Client(); + +client.load([], GRPC_PROTO_PATH); + +export function grpcTest() { + client.connect(GRPC_ADDR, {plaintext: true}); + + const response = client.invoke("main.FeatureExplorer/GetFeature", { + latitude: 410248224, + longitude: -747127767 + }) + + check(response, {"gRPCC status is OK": (r) => r && r.status === grpc.StatusOK}); + console.log(JSON.stringify(response.message)) + + client.close() +} \ No newline at end of file diff --git a/internal/cmd/testdata/summary/main.js b/internal/cmd/testdata/summary/main.js new file mode 100644 index 00000000000..6429b1f0beb --- /dev/null +++ b/internal/cmd/testdata/summary/main.js @@ -0,0 +1,39 @@ +export {apiTest} from './api.js'; +export {browserTest} from './browser.js'; +export {grpcTest} from './grpc.js'; +export {wsTest} from './ws.js'; + +export const options = { + thresholds: { + 'http_reqs': ['count<10', 'rate>2'], + 'http_reqs{group: ::auth}': ['count>1'], + 'http_reqs{scenario: api}': ['count>1'], + 'http_reqs{scenario: api, group: ::auth}': ['count<5'], + 'http_req_duration{group: ::auth}': ['p(95)<200', 'avg<100'], + }, + scenarios: { + api: { + executor: 'per-vu-iterations', + vus: 1, + iterations: 1, + exec: 'apiTest', + }, + browser: { + executor: 'shared-iterations', + options: { + browser: { + type: 'chromium', + }, + }, + exec: 'browserTest', + }, + grpc: { + executor: 'shared-iterations', + exec: 'grpcTest', + }, + ws: { + executor: 'shared-iterations', + exec: 'wsTest', + }, + }, +} diff --git a/internal/cmd/testdata/summary/ws.js b/internal/cmd/testdata/summary/ws.js new file mode 100644 index 00000000000..7b021405860 --- /dev/null +++ b/internal/cmd/testdata/summary/ws.js @@ -0,0 +1,79 @@ +import { + randomString, + randomIntBetween, +} from "https://jslib.k6.io/k6-utils/1.1.0/index.js"; +import {WebSocket} from "k6/experimental/websockets"; +import { + setTimeout, + clearTimeout, + setInterval, + clearInterval, +} from "k6/timers"; + +let chatRoomName = "publicRoom"; // choose your chat room name +let sessionDuration = randomIntBetween(1000, 5000); // user session between 5s and 1m + +export function wsTest() { + for (let i = 0; i < 4; i++) { + startWSWorker(i); + } +} + +function startWSWorker(id) { + let url = `wss://test-api.k6.io/ws/crocochat/${chatRoomName}/`; + let ws = new WebSocket(url); + ws.binaryType = "arraybuffer"; + ws.addEventListener("open", () => { + ws.send( + JSON.stringify({ + event: "SET_NAME", + new_name: `Croc ${__VU}:${id}`, + }) + ); + + ws.addEventListener("message", (e) => { + let msg = JSON.parse(e.data); + if (msg.event === "CHAT_MSG") { + console.log( + `VU ${__VU}:${id} received: ${msg.user} says: ${msg.message}` + ); + } else if (msg.event === "ERROR") { + console.error(`VU ${__VU}:${id} received:: ${msg.message}`); + } else { + console.log( + `VU ${__VU}:${id} received unhandled message: ${msg.message}` + ); + } + }); + + let intervalId = setInterval(() => { + ws.send( + JSON.stringify({ + event: "SAY", + message: `I'm saying ${randomString(5)}`, + }) + ); + }, randomIntBetween(2000, 8000)); // say something every 2-8seconds + + let timeout1id = setTimeout(function () { + clearInterval(intervalId); + console.log( + `VU ${__VU}:${id}: ${sessionDuration}ms passed, leaving the chat` + ); + ws.send(JSON.stringify({event: "LEAVE"})); + }, sessionDuration); + + let timeout2id = setTimeout(function () { + console.log( + `Closing the socket forcefully 3s after graceful LEAVE` + ); + ws.close(); + }, sessionDuration + 3000); + + ws.addEventListener("close", () => { + clearTimeout(timeout1id); + clearTimeout(timeout2id); + console.log(`VU ${__VU}:${id}: disconnected`); + }); + }); +} \ No newline at end of file diff --git a/internal/cmd/tests/cmd_run_grpc_test.go b/internal/cmd/tests/cmd_run_grpc_test.go index a7399db4428..81a594cdbc0 100644 --- a/internal/cmd/tests/cmd_run_grpc_test.go +++ b/internal/cmd/tests/cmd_run_grpc_test.go @@ -15,7 +15,7 @@ import ( const projectRootPath = "../../../" // TestGRPCInputOutput runs same k6's scripts that we have in example folder -// it check that output contains/not contains cetane things +// it checks that output contains/not contains cetane things func TestGRPCInputOutput(t *testing.T) { t.Parallel() diff --git a/internal/cmd/tests/cmd_run_test.go b/internal/cmd/tests/cmd_run_test.go index a6abfb8563c..a9dc41326ca 100644 --- a/internal/cmd/tests/cmd_run_test.go +++ b/internal/cmd/tests/cmd_run_test.go @@ -329,17 +329,17 @@ func TestMetricsAndThresholds(t *testing.T) { var summary map[string]interface{} require.NoError(t, json.Unmarshal(ts.Stdout.Bytes(), &summary)) - metrics, ok := summary["metrics"].(map[string]interface{}) + thresholds, ok := summary["thresholds"].(map[string]interface{}) require.True(t, ok) - teardownCounter, ok := metrics["teardown_counter"].(map[string]interface{}) + teardownCounter, ok := thresholds["teardown_counter"].(map[string]interface{}) require.True(t, ok) - teardownThresholds, ok := teardownCounter["thresholds"].(map[string]interface{}) + teardownCounterThresholds, ok := teardownCounter["thresholds"].([]interface{}) require.True(t, ok) - expected := map[string]interface{}{"count == 1": map[string]interface{}{"ok": true}} - require.Equal(t, expected, teardownThresholds) + expected := []interface{}{map[string]interface{}{"source": "count == 1", "ok": true}} + require.Equal(t, expected, teardownCounterThresholds) } func TestSSLKEYLOGFILEAbsolute(t *testing.T) { @@ -468,9 +468,9 @@ func TestSubMetricThresholdNoData(t *testing.T) { assert.Len(t, ts.LoggerHook.Drain(), 0) assert.Contains(t, ts.Stdout.String(), ` - one..................: 0 0/s - { tag:xyz }........: 0 0/s - two..................: 42`) + one....................................: 0 0/s + { tag:xyz }..........................: 0 0/s + two....................................: 42`) } func getTestServer(tb testing.TB, routes map[string]http.Handler) *httptest.Server { @@ -616,10 +616,10 @@ func TestSetupTeardownThresholds(t *testing.T) { stdOut := ts.Stdout.String() t.Log(stdOut) - assert.Contains(t, stdOut, `✓ checks.........................: 100.00% 8 out of 8`) - assert.Contains(t, stdOut, `✓ http_reqs......................: 8`) - assert.Contains(t, stdOut, `✓ iterations.....................: 5`) - assert.Contains(t, stdOut, `✓ setup_teardown.................: 3`) + assert.Contains(t, stdOut, "checks\n ✓ 'rate == 1' rate=100.00%") + assert.Contains(t, stdOut, "http_reqs\n ✓ 'count == 8' count=8") + assert.Contains(t, stdOut, "iterations\n ✓ 'count == 5' count=5") + assert.Contains(t, stdOut, "setup_teardown\n ✓ 'count == 3' count=3") logMsgs := ts.LoggerHook.Drain() for _, msg := range logMsgs { @@ -669,10 +669,10 @@ func TestThresholdsFailed(t *testing.T) { assert.True(t, testutils.LogContains(ts.LoggerHook.Drain(), logrus.ErrorLevel, expErr)) stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, ` ✓ iterations...........: 3`) - assert.Contains(t, stdout, ` ✗ { scenario:sc1 }...: 1`) - assert.Contains(t, stdout, ` ✗ { scenario:sc2 }...: 2`) - assert.Contains(t, stdout, ` ✓ { scenario:sc3 }...: 0 0/s`) + assert.Contains(t, stdout, " iterations\n ✓ 'count == 3' count=3") + assert.Contains(t, stdout, " {scenario:sc1}\n ✗ 'count == 2' count=1") + assert.Contains(t, stdout, " ✗ 'count == 2' count=1") + assert.Contains(t, stdout, " {scenario:sc2}\n ✗ 'count == 1' count=2") } func TestAbortedByThreshold(t *testing.T) { @@ -711,7 +711,7 @@ func TestAbortedByThreshold(t *testing.T) { assert.True(t, testutils.LogContains(ts.LoggerHook.Drain(), logrus.ErrorLevel, expErr)) stdOut := ts.Stdout.String() t.Log(stdOut) - assert.Contains(t, stdOut, `✗ iterations`) + assert.Contains(t, stdOut, "iterations\n ✗ 'count == 1'") assert.Contains(t, stdOut, `teardown() called`) assert.Contains(t, stdOut, `level=debug msg="Metrics emission of VUs and VUsMax metrics stopped"`) assert.Contains(t, stdOut, `level=debug msg="Metrics and traces processing finished!"`) @@ -762,9 +762,20 @@ func TestAbortedByUserWithGoodThresholds(t *testing.T) { assert.True(t, testutils.LogContains(logs, logrus.ErrorLevel, `test run was aborted because k6 received a 'interrupt' signal`)) stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, `✓ iterations`) - assert.Contains(t, stdout, `✓ tc`) - assert.Contains(t, stdout, `✓ { group:::teardown }`) + assert.Contains(t, stdout, ` + iterations + ✓ 'count >= 1' count=3 + + tc + ✓ 'count == 1' count=1 + + {group:::setup} + ✓ 'count == 0' count=0 + ✓ 'count == 0' count=0 + + {group:::teardown} + ✓ 'count == 1' count=1 + ✓ 'count == 1' count=1`) assert.Contains(t, stdout, `Stopping k6 in response to signal`) assert.Contains(t, stdout, `level=debug msg="Metrics emission of VUs and VUsMax metrics stopped"`) assert.Contains(t, stdout, `level=debug msg="Metrics and traces processing finished!"`) @@ -1373,7 +1384,7 @@ func TestMetricTagAndSetupDataIsolation(t *testing.T) { t.Log(stdout) assert.NotContains(t, stdout, "execution: local") // because of --quiet assert.NotContains(t, stdout, "output: cloud") // because of --quiet - assert.Equal(t, 12, strings.Count(stdout, "✓")) + assert.Equal(t, 25, strings.Count(stdout, "✓")) } func getSampleValues(t *testing.T, jsonOutput []byte, metric string, tags map[string]string) []float64 { @@ -1540,7 +1551,7 @@ func TestMinIterationDuration(t *testing.T) { stdout := ts.Stdout.String() t.Log(stdout) - assert.Contains(t, stdout, "✓ test_counter.........: 3") + assert.Contains(t, stdout, "test_counter\n ✓ 'count == 3") } func TestMetricNameError(t *testing.T) { @@ -1708,7 +1719,7 @@ func TestRunWithCloudOutputOverrides(t *testing.T) { t.Log(stdout) assert.Contains(t, stdout, "execution: local") assert.Contains(t, stdout, "output: cloud (https://bogus.url/runs/132), json (results.json)") - assert.Contains(t, stdout, "iterations...........: 1") + assert.Contains(t, stdout, "iterations.............................: 1") } func TestRunWithCloudOutputCustomConfigAndOverridesLegacyCloudOption(t *testing.T) { diff --git a/internal/cmd/ui.go b/internal/cmd/ui.go index 48723f3a574..df733a502d1 100644 --- a/internal/cmd/ui.go +++ b/internal/cmd/ui.go @@ -23,6 +23,7 @@ import ( "go.k6.io/k6/lib" "go.k6.io/k6/lib/consts" "go.k6.io/k6/output" + "go.k6.io/k6/output/summary" ) const ( @@ -116,7 +117,7 @@ func printExecutionDescription( for _, out := range outputs { desc := out.Description() switch desc { - case engine.IngesterDescription, lib.GroupSummaryDescription: + case engine.IngesterDescription, lib.GroupSummaryDescription, summary.OutputName: continue } if strings.HasPrefix(desc, dashboard.OutputName) { diff --git a/internal/js/runner.go b/internal/js/runner.go index c5b28ae9548..4db794e786b 100644 --- a/internal/js/runner.go +++ b/internal/js/runner.go @@ -349,9 +349,11 @@ func (r *Runner) IsExecutable(name string) bool { } // HandleSummary calls the specified summary callback, if supplied. -func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[string]io.Reader, error) { - summaryDataForJS := summarizeMetricsToObject(summary, r.Bundle.Options, r.setupData) - +func (r *Runner) HandleSummary( + ctx context.Context, + legacy *lib.LegacySummary, + summary *lib.Summary, +) (map[string]io.Reader, error) { out := make(chan metrics.SampleContainer, 100) defer close(out) @@ -360,7 +362,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s } }() - summaryCtx, cancel := context.WithTimeout(ctx, r.getTimeoutFor(consts.HandleSummaryFn)) + summaryCtx, cancel := context.WithTimeout(ctx, 10*time.Minute) defer cancel() vu, err := r.newVU(summaryCtx, 0, 0, out) @@ -373,22 +375,15 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s }) vu.moduleVUImpl.ctx = summaryCtx - callbackResult := sobek.Undefined() - fn := vu.getExported(consts.HandleSummaryFn) - if fn != nil { - handleSummaryFn, ok := sobek.AssertFunction(fn) - if !ok { - return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) - } + noColor, enableColors, summaryDataForJS, summaryCode := prepareHandleSummaryCall(r, legacy, summary) - callbackResult, _, _, err = vu.runFn(summaryCtx, false, handleSummaryFn, nil, vu.Runtime.ToValue(summaryDataForJS)) - if err != nil { - errText, fields := errext.Format(err) - r.preInitState.Logger.WithFields(fields).Error(errText) - } + handleSummaryDataAsValue := vu.Runtime.ToValue(summaryDataForJS) + callbackResult, err := runUserProvidedHandleSummaryCallback(summaryCtx, vu, handleSummaryDataAsValue) + if err != nil { + return nil, err } - wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", jslibSummaryCode, 1) + wrapper := strings.Replace(summaryWrapperLambdaCode, "/*JSLIB_SUMMARY_CODE*/", summaryCode, 1) handleSummaryWrapperRaw, err := vu.Runtime.RunString(wrapper) if err != nil { return nil, fmt.Errorf("unexpected error while getting the summary wrapper: %w", err) @@ -398,11 +393,7 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s return nil, fmt.Errorf("unexpected error did not get a callable summary wrapper") } - wrapperArgs := []sobek.Value{ - callbackResult, - vu.Runtime.ToValue(r.Bundle.preInitState.RuntimeOptions.SummaryExport.String), - vu.Runtime.ToValue(summaryDataForJS), - } + wrapperArgs := prepareHandleWrapperArgs(vu, noColor, enableColors, callbackResult, handleSummaryDataAsValue) rawResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryWrapper, nil, wrapperArgs...) if deadlineError := r.checkDeadline(summaryCtx, consts.HandleSummaryFn, rawResult, err); deadlineError != nil { @@ -412,15 +403,96 @@ func (r *Runner) HandleSummary(ctx context.Context, summary *lib.Summary) (map[s if err != nil { return nil, fmt.Errorf("unexpected error while generating the summary: %w", err) } + return getSummaryResult(rawResult) } +func prepareHandleSummaryCall( + r *Runner, + legacy *lib.LegacySummary, + summary *lib.Summary, +) (bool, bool, interface{}, string) { + var ( + noColor bool + enableColors bool + summaryDataForJS interface{} + summaryCode string + ) + + // TODO: Remove this code block once we stop supporting the legacy summary. + if legacy != nil { + noColor = legacy.NoColor + enableColors = !legacy.NoColor && legacy.UIState.IsStdOutTTY + summaryDataForJS = summarizeMetricsToObject(legacy, r.Bundle.Options, r.setupData) + summaryCode = jslibSummaryLegacyCode + } + + if summary != nil { + noColor = summary.NoColor + enableColors = !summary.NoColor && summary.UIState.IsStdOutTTY + summaryDataForJS = summary + summaryCode = jslibSummaryCode + } + + return noColor, enableColors, summaryDataForJS, summaryCode +} + +func runUserProvidedHandleSummaryCallback( + summaryCtx context.Context, + vu *VU, + summaryData sobek.Value, +) (sobek.Value, error) { + fn := vu.getExported(consts.HandleSummaryFn) + if fn == nil { + return sobek.Undefined(), nil + } + + handleSummaryFn, ok := sobek.AssertFunction(fn) + if !ok { + return nil, fmt.Errorf("exported identifier %s must be a function", consts.HandleSummaryFn) + } + + callbackResult, _, _, err := vu.runFn(summaryCtx, false, handleSummaryFn, nil, summaryData) + if err != nil { + errText, fields := errext.Format(err) + vu.Runner.preInitState.Logger.WithFields(fields).Error(errText) + } + + // In case of err, we only want to log it, + // but still proceed with the built-in summary handler, so we return nil. + return callbackResult, nil +} + +func prepareHandleWrapperArgs( + vu *VU, + noColor bool, enableColors bool, + callbackResult sobek.Value, + summaryDataForJS interface{}, +) []sobek.Value { + options := map[string]interface{}{ + // TODO: improve when we can easily export all option values, including defaults? + "summaryTrendStats": vu.Runner.Bundle.Options.SummaryTrendStats, + "summaryTimeUnit": vu.Runner.Bundle.Options.SummaryTimeUnit.String, + "noColor": noColor, // TODO: move to the (runtime) options + "enableColors": enableColors, + } + + wrapperArgs := []sobek.Value{ + callbackResult, + vu.Runtime.ToValue(vu.Runner.Bundle.preInitState.RuntimeOptions.SummaryExport.String), + vu.Runtime.ToValue(summaryDataForJS), + vu.Runtime.ToValue(options), + } + + return wrapperArgs +} + func (r *Runner) checkDeadline(ctx context.Context, name string, result sobek.Value, err error) error { if deadline, ok := ctx.Deadline(); !(ok && time.Now().After(deadline)) { return nil } - // deadline is reached so we have timeouted but this might've not been registered correctly + // deadline is reached so we have timed-outed but this might've not been registered correctly // we could have an error that is not context.Canceled in which case we should return it instead //nolint:errorlint if err, ok := err.(*sobek.InterruptedError); ok && result != nil && err.Value() != context.Canceled { diff --git a/internal/js/summary-legacy.js b/internal/js/summary-legacy.js new file mode 100644 index 00000000000..ada659d08e9 --- /dev/null +++ b/internal/js/summary-legacy.js @@ -0,0 +1,426 @@ +/** + * NOTE: This file is a legacy version of the summary generation code, and is kept around for + * backwards compatibility, until we decide to remove the support for the old summary format. + * + * This file contains code used to generate a textual summary of tests results, as displayed + * in the user's terminal at the end of a k6 test run, also known as "end of test summary". + * + * The main entry point is the `generateTextSummary` function, which takes the test data, + * and returns a formatted string summarizing the test results, ready to be written to the terminal. + * + * For convenience, the file also exports the `humanizeValue` function. + */ +exports.humanizeValue = humanizeValue +exports.textSummary = generateTextSummary + +var forEach = function (obj, callback) { + for (var key in obj) { + if (obj.hasOwnProperty(key)) { + if (callback(key, obj[key])) { + break + } + } + } +} + +var palette = { + bold: 1, + faint: 2, + red: 31, + green: 32, + cyan: 36, + //TODO: add others? +} + +var groupPrefix = '█' +var detailsPrefix = '↳' +var succMark = '✓' +var failMark = '✗' +var defaultOptions = { + indent: ' ', + enableColors: true, + summaryTimeUnit: null, + summaryTrendStats: null, +} + +// strWidth tries to return the actual width the string will take up on the +// screen, without any terminal formatting, unicode ligatures, etc. +function strWidth(s) { + // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ + var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better + var inEscSeq = false + var inLongEscSeq = false + var width = 0 + for (var char of data) { + if (char.done) { + break + } + + // Skip over ANSI escape codes. + if (char == '\x1b') { + inEscSeq = true + continue + } + if (inEscSeq && char == '[') { + inLongEscSeq = true + continue + } + if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { + inEscSeq = false + inLongEscSeq = false + continue + } + if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { + inEscSeq = false + continue + } + + if (!inEscSeq && !inLongEscSeq) { + width++ + } + } + return width +} + +function summarizeCheck(indent, check, decorate) { + if (check.fails == 0) { + return decorate(indent + succMark + ' ' + check.name, palette.green) + } + + var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) + return decorate( + indent + + failMark + + ' ' + + check.name + + '\n' + + indent + + ' ' + + detailsPrefix + + ' ' + + succPercent + + '% — ' + + succMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + palette.red + ) +} + +function summarizeGroup(indent, group, decorate) { + var result = [] + if (group.name != '') { + result.push(indent + groupPrefix + ' ' + group.name + '\n') + indent = indent + ' ' + } + + for (var i = 0; i < group.checks.length; i++) { + result.push(summarizeCheck(indent, group.checks[i], decorate)) + } + if (group.checks.length > 0) { + result.push('') + } + for (var i = 0; i < group.groups.length; i++) { + Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) + } + + return result +} + +function displayNameForMetric(name) { + var subMetricPos = name.indexOf('{') + if (subMetricPos >= 0) { + return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' + } + return name +} + +function indentForMetric(name) { + if (name.indexOf('{') >= 0) { + return ' ' + } + return '' +} + +function humanizeBytes(bytes) { + var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] + var base = 1000 + if (bytes < 10) { + return bytes + ' B' + } + + var e = Math.floor(Math.log(bytes) / Math.log(base)) + var suffix = units[e | 0] + var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix +} + +var unitMap = { + s: { unit: 's', coef: 0.001 }, + ms: { unit: 'ms', coef: 1 }, + us: { unit: 'µs', coef: 1000 }, +} + +function toFixedNoTrailingZeros(val, prec) { + // TODO: figure out something better? + return parseFloat(val.toFixed(prec)).toString() +} + +function toFixedNoTrailingZerosTrunc(val, prec) { + var mult = Math.pow(10, prec) + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) +} + +function humanizeGenericDuration(dur) { + if (dur === 0) { + return '0s' + } + + if (dur < 0.001) { + // smaller than a microsecond, print nanoseconds + return Math.trunc(dur * 1000000) + 'ns' + } + if (dur < 1) { + // smaller than a millisecond, print microseconds + return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' + } + if (dur < 1000) { + // duration is smaller than a second + return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' + } + + var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' + var rem = Math.trunc(dur / 60000) + if (rem < 1) { + // less than a minute + return result + } + result = (rem % 60) + 'm' + result + rem = Math.trunc(rem / 60) + if (rem < 1) { + // less than an hour + return result + } + return rem + 'h' + result +} + +function humanizeDuration(dur, timeUnit) { + if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { + return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + } + + return humanizeGenericDuration(dur) +} + +function humanizeValue(val, metric, timeUnit) { + if (metric.type == 'rate') { + // Truncate instead of round when decreasing precision to 2 decimal places + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' + } + + switch (metric.contains) { + case 'data': + return humanizeBytes(val) + case 'time': + return humanizeDuration(val, timeUnit) + default: + return toFixedNoTrailingZeros(val, 6) + } +} + +function nonTrendMetricValueForSum(metric, timeUnit) { + switch (metric.type) { + case 'counter': + return [ + humanizeValue(metric.values.count, metric, timeUnit), + humanizeValue(metric.values.rate, metric, timeUnit) + '/s', + ] + case 'gauge': + return [ + humanizeValue(metric.values.value, metric, timeUnit), + 'min=' + humanizeValue(metric.values.min, metric, timeUnit), + 'max=' + humanizeValue(metric.values.max, metric, timeUnit), + ] + case 'rate': + return [ + humanizeValue(metric.values.rate, metric, timeUnit), + `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, + ] + default: + return ['[no data]'] + } +} + +function summarizeMetrics(options, data, decorate) { + var indent = options.indent + ' ' + var result = [] + + var names = [] + var nameLenMax = 0 + + var nonTrendValues = {} + var nonTrendValueMaxLen = 0 + var nonTrendExtras = {} + var nonTrendExtraMaxLens = [0, 0] + + var trendCols = {} + var numTrendColumns = options.summaryTrendStats.length + var trendColMaxLens = new Array(numTrendColumns).fill(0) + forEach(data.metrics, function (name, metric) { + names.push(name) + // When calculating widths for metrics, account for the indentation on submetrics. + var displayName = indentForMetric(name) + displayNameForMetric(name) + var displayNameWidth = strWidth(displayName) + if (displayNameWidth > nameLenMax) { + nameLenMax = displayNameWidth + } + + if (metric.type == 'trend') { + var cols = [] + for (var i = 0; i < numTrendColumns; i++) { + var tc = options.summaryTrendStats[i] + var value = metric.values[tc] + if (tc === 'count') { + value = value.toString() + } else { + value = humanizeValue(value, metric, options.summaryTimeUnit) + } + var valLen = strWidth(value) + if (valLen > trendColMaxLens[i]) { + trendColMaxLens[i] = valLen + } + cols[i] = value + } + trendCols[name] = cols + return + } + var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) + nonTrendValues[name] = values[0] + var valueLen = strWidth(values[0]) + if (valueLen > nonTrendValueMaxLen) { + nonTrendValueMaxLen = valueLen + } + nonTrendExtras[name] = values.slice(1) + for (var i = 1; i < values.length; i++) { + var extraLen = strWidth(values[i]) + if (extraLen > nonTrendExtraMaxLens[i - 1]) { + nonTrendExtraMaxLens[i - 1] = extraLen + } + } + }) + + // sort all metrics but keep sub metrics grouped with their parent metrics + names.sort(function (metric1, metric2) { + var parent1 = metric1.split('{', 1)[0] + var parent2 = metric2.split('{', 1)[0] + var result = parent1.localeCompare(parent2) + if (result !== 0) { + return result + } + var sub1 = metric1.substring(parent1.length) + var sub2 = metric2.substring(parent2.length) + return sub1.localeCompare(sub2) + }) + + var getData = function (name) { + if (trendCols.hasOwnProperty(name)) { + var cols = trendCols[name] + var tmpCols = new Array(numTrendColumns) + for (var i = 0; i < cols.length; i++) { + tmpCols[i] = + options.summaryTrendStats[i] + + '=' + + decorate(cols[i], palette.cyan) + + ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) + } + return tmpCols.join(' ') + } + + var value = nonTrendValues[name] + var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) + + var extras = nonTrendExtras[name] + if (extras.length == 1) { + fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) + } else if (extras.length > 1) { + var parts = new Array(extras.length) + for (var i = 0; i < extras.length; i++) { + parts[i] = + decorate(extras[i], palette.cyan, palette.faint) + + ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) + } + fmtData = fmtData + ' ' + parts.join(' ') + } + + return fmtData + } + + for (var name of names) { + var metric = data.metrics[name] + var mark = ' ' + var markColor = function (text) { + return text + } // noop + + if (metric.thresholds) { + mark = succMark + markColor = function (text) { + return decorate(text, palette.green) + } + forEach(metric.thresholds, function (name, threshold) { + if (!threshold.ok) { + mark = failMark + markColor = function (text) { + return decorate(text, palette.red) + } + return true // break + } + }) + } + var fmtIndent = indentForMetric(name) + var fmtName = displayNameForMetric(name) + fmtName = + fmtName + + decorate( + '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', + palette.faint + ) + + result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) + } + + return result +} + +function generateTextSummary(data, options) { + var mergedOpts = Object.assign({}, defaultOptions, data.options, options) + var lines = [] + + // TODO: move all of these functions into an object with methods? + var decorate = function (text) { + return text + } + if (mergedOpts.enableColors) { + decorate = function (text, color /*, ...rest*/) { + var result = '\x1b[' + color + for (var i = 2; i < arguments.length; i++) { + result += ';' + arguments[i] + } + return result + 'm' + text + '\x1b[0m' + } + } + + Array.prototype.push.apply( + lines, + summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) + ) + + Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) + + return lines.join('\n') +} + diff --git a/internal/js/summary-wrapper.js b/internal/js/summary-wrapper.js index 2ec8f65a5a5..dc0d9e48141 100644 --- a/internal/js/summary-wrapper.js +++ b/internal/js/summary-wrapper.js @@ -1,4 +1,5 @@ (function () { + // TODO: Find a better name, more descriptive more this variable. var jslib = {}; (function (module, exports) { /*JSLIB_SUMMARY_CODE*/; @@ -59,12 +60,11 @@ return JSON.stringify(results, null, 4); }; - return function (summaryCallbackResult, jsonSummaryPath, data) { - var result = summaryCallbackResult; + return function (summaryCallbackResult, jsonSummaryPath, data, options) { + let result = summaryCallbackResult; if (!result) { - var enableColors = (!data.options.noColor && data.state.isStdOutTTY); result = { - 'stdout': '\n' + jslib.textSummary(data, {indent: ' ', enableColors: enableColors}) + '\n\n', + 'stdout': '\n' + jslib.textSummary(data, options) + '\n\n', }; } diff --git a/internal/js/summary.go b/internal/js/summary.go index 86dc34d18bc..1047eb83d7f 100644 --- a/internal/js/summary.go +++ b/internal/js/summary.go @@ -18,6 +18,11 @@ import ( //go:embed summary.js var jslibSummaryCode string +// TODO: Remove me once we stop supporting the legacy summary. +// +//go:embed summary-legacy.js +var jslibSummaryLegacyCode string + //go:embed summary-wrapper.js var summaryWrapperLambdaCode string @@ -33,11 +38,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat switch sink := sink.(type) { case *metrics.CounterSink: result = sink.Format(t) - rate := 0.0 - if t > 0 { - rate = sink.Value / (float64(t) / float64(time.Second)) - } - result["rate"] = rate + result["rate"] = calculateCounterRate(sink.Value, t) case *metrics.GaugeSink: result = sink.Format(t) result["min"] = sink.Min @@ -59,7 +60,7 @@ func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Durat // summarizeMetricsToObject transforms the summary objects in a way that's // suitable to pass to the JS runtime or export to JSON. -func summarizeMetricsToObject(data *lib.Summary, options lib.Options, setupData []byte) map[string]interface{} { +func summarizeMetricsToObject(data *lib.LegacySummary, options lib.Options, setupData []byte) map[string]interface{} { m := make(map[string]interface{}) m["root_group"] = exportGroup(data.RootGroup) m["options"] = map[string]interface{}{ @@ -159,3 +160,10 @@ func getSummaryResult(rawResult sobek.Value) (map[string]io.Reader, error) { return results, nil } + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} diff --git a/internal/js/summary.js b/internal/js/summary.js index 93b2a7a95dc..c3cbfda8835 100644 --- a/internal/js/summary.js +++ b/internal/js/summary.js @@ -1,413 +1,1392 @@ -var forEach = function (obj, callback) { - for (var key in obj) { - if (obj.hasOwnProperty(key)) { - if (callback(key, obj[key])) { - break - } - } - } +/** + * This file contains code used to generate a textual summary of tests results, as displayed + * in the user's terminal at the end of a k6 test run, also known as "end of test summary". + * + * The main entry point is the `generateTextSummary` function, which takes the test data as well as a report + * object containing results for checks, metrics, thresholds, groups, and scenarios, and returns a formatted + * string summarizing the test results, ready to be written to the terminal. + * + * For convenience, the file also exports the `humanizeValue` function. + */ +exports.humanizeValue = humanizeValue; +exports.textSummary = generateTextSummary; + +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios. + * @param {Object} options - Additional options that override defaults. + * @returns {string} A formatted summary of the test results. + */ +function generateTextSummary(report, options) { + const mergedOpts = Object.assign({}, defaultOptions, options); + + // Create a render context holding information such as indentation level to apply + const context = new RenderContext(0); + + // Create a formatter with default settings (colors enabled) + const formatter = new ANSIFormatter(mergedOpts); + + const reportGenerator = new TestReportGenerator( + formatter, + context, + mergedOpts, + ); + + return reportGenerator.generate(report); } -var palette = { - bold: 1, - faint: 2, - red: 31, - green: 32, - cyan: 36, - //TODO: add others? +/** + * Formats a metric value into a human-readable form, depending on the metric type and content. + * + * @param {number} val - The metric value. + * @param {ReportMetric} metric - The metric object. + * @param {string|null} timeUnit - The time unit for duration metrics. + * @returns {string} The humanized metric value. + */ +function humanizeValue(val, metric, timeUnit) { + if (metric.type === 'rate') { + // Truncate instead of round when decreasing precision to 2 decimal places + return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%'; + } + + switch (metric.contains) { + case 'data': + return humanizeBytes(val); + case 'time': + return humanizeDuration(val, timeUnit); + default: + return toFixedNoTrailingZeros(val, 6); + } } -var groupPrefix = '█' -var detailsPrefix = '↳' -var succMark = '✓' -var failMark = '✗' -var defaultOptions = { - indent: ' ', - enableColors: true, - summaryTimeUnit: null, - summaryTrendStats: null, +/** + * @typedef {Object} Report + * @property {Record} thresholds - The thresholds report. + * @property {ReportMetrics} metrics - The metrics report. + * @property {Record} groups - The groups report. + * @property {Record} scenarios - The scenarios report. + */ + +/** + * @typedef {Object} ReportThreshold + * @property {string} source - The threshold expression source. + * @property {boolean} ok - Whether the threshold was satisfied or not. + */ + +/** + * @typedef {Object} ReportGroup + * @property {ReportChecks} checks - The checks report. + * @property {ReportMetrics} metrics - The metrics report. + * @property {Record} groups - The nested groups report. + */ + +/** + * @typedef {Object} ReportMetric + * @property {string} name - The name of the reported metric. + * @property {"counter"|"gauge"|"rate"|"trend"} type - The type of the metric. + * @property {"time"|"data"|"default"} contains - The type of data contained in the metric + * @property {Record} values - Key-value pairs of metric statistics (e.g. min, max, avg). + * @property {EngineThreshold[]} [thresholds] - Optional array of thresholds associated with this metric. + */ + +/** + * @typedef {Object} ReportMetrics + * @property {Record} http - The HTTP metrics. + * @property {Record} execution - The execution-related metrics. + * @property {Record} network - The network-related metrics. + * @property {Record} browser - The browser-related metrics. + * @property {Record} webvitals - The web vitals metrics. + * @property {Record} grpc - The grpc-related metrics. + * @property {Record} websocket - The websocket-related metrics. + * @property {Record} miscelaneous - The custom metrics. + */ + +/** + * @typedef {Object} ReportChecks + * @property {Record} metrics - The metrics for checks. + * @property {EngineCheck[]} ordered_checks - The ordered checks. + */ + +/** + * @typedef {Object} ReportMetricThresholds + * @property {ReportMetric} metric - The metric object. + * @property {ReportThreshold[]} thresholds - The thresholds for the metric. + */ + +/** + * @typedef {Object} ReportData + * @property {Record} metrics - Collection of metrics keyed by their names. + */ + +/** + * @typedef {Object} EngineCheck + * @property {string} id - The check ID. + * @property {string} name - The check name. + * @property {string} path - The check path. + * @property {number} passes - The number of successful checks. + * @property {number} fails - The number of failed checks. + */ + +/** + * @typedef {Object} EngineThreshold + * @property {string} source - The threshold expression source. + * @property {boolean} ok - Whether the threshold was satisfied or not. + */ + +/** + * @typedef {Object} Options + * @property {boolean} [enableColors = true] - Whether to enable ANSI colors. + * @property {string | null} [summaryTimeUnit = null] - The time unit for duration metrics. + * @property {string[] | null} [summaryTrendStats = null] - The trend statistics to summarize. + * @property {boolean} [sortByName = true] - Whether to sort metrics by name. + */ + +/** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + */ +class TestReportGenerator { + /** + * Constructs a TestReportGenerator with a specified formatter + * + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Options} [options = {}] + */ + constructor(formatter, renderContext, options = {}) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = options; + } + + /** + * Generates a textual summary of test results, including checks, metrics, thresholds, groups, and scenarios. + * + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios as provided by k6. + * @returns {string} - A formatted summary of the test results. + */ + generate(report) { + const reportBuilder = new ReportBuilder( + this.formatter, + this.renderContext, + this.options, + ); + return reportBuilder + .addThresholds(report.thresholds) + .addTotalResults(report) + .addGroups(report.groups) + .addScenarios(report.scenarios) + .build(); + } } -// strWidth tries to return the actual width the string will take up on the -// screen, without any terminal formatting, unicode ligatures, etc. -function strWidth(s) { - // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ - var data = s.normalize('NFKC') // This used to be NFKD in Go, but this should be better - var inEscSeq = false - var inLongEscSeq = false - var width = 0 - for (var char of data) { - if (char.done) { - break - } - - // Skip over ANSI escape codes. - if (char == '\x1b') { - inEscSeq = true - continue - } - if (inEscSeq && char == '[') { - inLongEscSeq = true - continue - } - if (inEscSeq && inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x7e) { - inEscSeq = false - inLongEscSeq = false - continue - } - if (inEscSeq && !inLongEscSeq && char.charCodeAt(0) >= 0x40 && char.charCodeAt(0) <= 0x5f) { - inEscSeq = false - continue - } - - if (!inEscSeq && !inLongEscSeq) { - width++ - } - } - return width +/** + * Exposes methods for generating a textual summary of test results. + */ +class ReportBuilder { + /** + * Creates a new ReportBuilder with a specified formatter and options. + * + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param options + */ + constructor(formatter, renderContext, options) { + this.formatter = formatter; + this.renderContext = renderContext; + this.options = options; + this.sections = []; + } + + /** + * Adds a thresholds section to the report. + * + * @param {Record} thresholds - The thresholds to add to the report. + * @returns {ReportBuilder} + */ + addThresholds(thresholds) { + if (!thresholds || Object.keys(thresholds).length === 0) return this; + + this.sections.push({ + title: 'THRESHOLDS', + content: this._renderThresholds(thresholds), + }); + return this; + } + + /** + * Adds a total results section to the report. + * + * @param {Report} report - The report object containing thresholds, checks, metrics, groups, and scenarios as provided by k6. + * @returns {ReportBuilder} + */ + addTotalResults(report) { + this.sections.push({ + title: 'TOTAL RESULTS', + content: [ + ...this._renderChecks(report.checks), + ...this._renderMetrics(report.metrics), + ], + }); + return this; + } + + /** + * Adds groups sections to the report. + * + * @param {Record} groups - The groups to add to the report. + * @returns {ReportBuilder} + */ + addGroups(groups) { + if (!groups) return this; + + Object.entries(groups) + .sort(([a], [b]) => a.localeCompare(b)) + .forEach(([groupName, groupData]) => { + this.sections.push({ + title: `GROUP: ${groupName}`, + content: this._renderGroupContent(groupData), + }); + }); + return this; + } + + /** + * Adds scenarios sections to the report. + * + * @param {Record} scenarios - The scenarios to add to the report. + * @returns {ReportBuilder} + */ + addScenarios(scenarios) { + if (!scenarios) return this; + + Object.entries(scenarios) + .sort(([a], [b]) => a.localeCompare(b)) + .forEach(([scenarioName, scenarioData]) => { + this.sections.push({ + title: `SCENARIO: ${scenarioName}`, + content: this._renderScenarioContent(scenarioData), + }); + }); + return this; + } + + /** + * Builds the final report by concatenating all sections together, resulting + * in a formatted string ready to be printed to the terminal. + * + * @returns {string} + */ + build() { + return this.sections + .map((section) => [ + renderTitle(section.title, this.formatter, this.renderContext), + ...section.content, + '\n', + ]) + .flat() + .reduce((acc, curr) => { + return (curr === '\n') ? acc + curr : acc + '\n' + curr; + }, ''); + } + + /** + * @param {Record} thresholds + * @param {RenderContext} [renderContext] + * @returns {string[]} + * @private + */ + _renderThresholds(thresholds, renderContext) { + // The thresholds list should be indent one level higher than the title + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Implement threshold rendering logic + return renderThresholds( + this._processThresholds(thresholds), + this.formatter, + renderContext, + this.options, + ); + } + + /** + * @param checks + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderChecks(checks, renderContext) { + renderContext = renderContext || this.renderContext; + + return checks + ? [...renderChecks(checks, this.formatter, renderContext, this.options), '\n'] + : []; + } + + /** + * @param {ReportMetrics} metrics - The metrics to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderMetrics(metrics, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Collect all metrics into a single object, so we can precompute all formatting information + const allMetrics = Object.entries(metrics).reduce((acc, [_, metrics]) => { + Object.assign(acc, metrics); + return acc; + }, {}); + + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + allMetrics, + renderContext, + this.options, + ); + + // Implement metrics rendering logic + return Object.entries(metrics) + .filter( + ([_, sectionMetrics]) => Object.keys(sectionMetrics).length > 0, + ) + .reduce( + (acc, [sectionName, sectionMetrics]) => (sectionName === "custom") + ? [[sectionName, sectionMetrics], ...acc] + : [...acc, [sectionName, sectionMetrics]] + , []) + .flatMap(([sectionName, sectionMetrics]) => [ + renderContext.indent( + this.formatter.boldify(sectionName.toUpperCase()), + ), + ...renderMetrics( + sectionMetrics, + summaryInfo, + this.formatter, + renderContext, + this.options, + ), + '\n', + ]); + } + + /** + * @param {ReportGroup} group - The group data to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderGroupContent(group, renderContext) { + renderContext = renderContext || this.renderContext; + + // Implement group content rendering + return [ + ...this._renderChecks(group.checks, renderContext), + ...this._renderMetrics(group.metrics, renderContext), + ...(group.groups ? this._renderNestedGroups(group.groups) : []), + ]; + } + + /** + * @param {ReportGroup} scenarioData - The scenario data to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderScenarioContent(scenarioData, renderContext) { + renderContext = renderContext || this.renderContext; + + // Similar to group content rendering + return [ + ...this._renderChecks(scenarioData.checks, renderContext), + ...this._renderMetrics(scenarioData.metrics, renderContext), + ...(scenarioData.groups + ? this._renderNestedGroups(scenarioData.groups) + : []), + ]; + } + + /** + * @param {Record} groups - The nested groups data to render. + * @param {RenderContext} [renderContext] - The render context to use for text rendering. + * @returns {string[]} + * @private + */ + _renderNestedGroups(groups, renderContext) { + renderContext = renderContext || this.renderContext; + renderContext = renderContext.indentedContext(1); + + // Render nested groups recursively + return Object.entries(groups) + .sort(([a], [b]) => a.localeCompare(b)) + .flatMap(([groupName, groupData]) => [ + renderTitle(`GROUP: ${groupName}`, this.formatter, renderContext, { + prefix: subtitlePrefix, + suffix: '\n', + }), + ...this._renderGroupContent(groupData, renderContext), + ]); + } + + // Private rendering methods + /** + * + * @param {ReportMetricThresholds} thresholds - The thresholds data to render. + * @returns {Record} + * @private + */ + _processThresholds(thresholds) { + // Transform thresholds into a format suitable for rendering + const metrics = {}; + Object.values(thresholds).forEach((threshold) => { + metrics[threshold.metric.name] = { + ...threshold.metric, + thresholds: threshold.thresholds, + }; + }); + return metrics; + } } -function summarizeCheck(indent, check, decorate) { - if (check.fails == 0) { - return decorate(indent + succMark + ' ' + check.name, palette.green) - } - - var succPercent = Math.floor((100 * check.passes) / (check.passes + check.fails)) - return decorate( - indent + - failMark + - ' ' + - check.name + - '\n' + - indent + - ' ' + - detailsPrefix + - ' ' + - succPercent + - '% — ' + - succMark + - ' ' + - check.passes + - ' / ' + - failMark + - ' ' + - check.fails, - palette.red - ) +/** + * RenderContext is a helper class that provides methods for rendering text + * with indentation. + * + * It is used to keep track of the current indentation level and provide + * methods for rendering text with the correct indentation. + * + * It also facilitates the creation of new RenderContext instances with + * different indentation levels. That way the indentation level can be + * easily adjusted relatively to a parent indentation level without having + * to manage some dedicated state manually. + */ +class RenderContext { + constructor(baseIndentationLevel = 0) { + this.baseIndentationLevel = baseIndentationLevel; + } + + /** + * Returns a string of spaces for a given indentation level. + * + * @param {number} [level] + * @returns {string} + */ + indentLevel(level = 1) { + return ' '.repeat((this.baseIndentationLevel + level) * 2); + } + + /** + * @param {string} text - The text to indent. + * @param {number} [level] + * @returns {string} + */ + indent(text, level = 1) { + return this.indentLevel(level) + text; + } + + /** + * indentedContext returns a new RenderContext with an incremented base indentation level. + * + * This allows to easily obtain a new RenderContext from a parent one with an + * increased indentation level. + * + * @param {number} increment - The increment to apply to the base indentation level. + * @returns {RenderContext} + */ + indentedContext(increment = 1) { + return new RenderContext(this.baseIndentationLevel + increment); + } } -function summarizeGroup(indent, group, decorate) { - var result = [] - if (group.name != '') { - result.push(indent + groupPrefix + ' ' + group.name + '\n') - indent = indent + ' ' - } - - for (var i = 0; i < group.checks.length; i++) { - result.push(summarizeCheck(indent, group.checks[i], decorate)) - } - if (group.checks.length > 0) { - result.push('') - } - for (var i = 0; i < group.groups.length; i++) { - Array.prototype.push.apply(result, summarizeGroup(indent, group.groups[i], decorate)) - } - - return result +/** + * ANSIFormatter provides methods for decorating text with ANSI color and style codes. + */ +class ANSIFormatter { + /** + * Constructs an ANSIFormatter with configurable color and styling options + * + * @param {Object} options - Configuration options for formatting + * @param {boolean} [options.enableColors=true] - Whether to enable color output + */ + constructor(options = {}) { + this.options = { + enableColors: true, + ...options, + }; + } + + /** + * Decorates text with ANSI color and style. + * + * @param {string} text - The text to decorate. + * @param {ANSIColor} color - The ANSI color to apply. + * @param {...ANSIStyle} styles - optional additional styles to apply. + * @returns {string} - Decorated text, or plain text if colors are disabled. + */ + decorate(text, color, ...styles) { + if (!this.options.enableColors) { + return text; + } + + const colorCode = ANSIColors[color] || ANSIColors.white; + const styleCodes = styles + .map((style) => ANSIStyles[style]) + .filter(Boolean); + + const fullCodes = styleCodes.length + ? [...styleCodes, colorCode].join(';') + : colorCode; + + const fullSequence = `\x1b[${fullCodes}m`; + + return `${fullSequence}${text}\x1b[0m`; + } + + /** + * Applies bold styling to text + * + * @param {string} text - Text to make bold + * @returns {string} Bold text + */ + boldify(text) { + if (!this.options.enableColors) { + return text; + } + return `\u001b[1m${text}\x1b[0m`; + } } -function displayNameForMetric(name) { - var subMetricPos = name.indexOf('{') - if (subMetricPos >= 0) { - return '{ ' + name.substring(subMetricPos + 1, name.length - 1) + ' }' - } - return name +/** + * ANSIColor maps ANSI color names to their respective escape codes. + * + * @typedef {'reset'|'black'|'red'|'green'|'yellow'|'blue'|'magenta'|'cyan'| + * 'white'|'brightRed'|'brightGreen'|'brightYellow'} ANSIColor + * + * @typedef {Record} ANSIColors + */ +const ANSIColors = { + reset: '\x1b[0m', + + // Standard Colors + black: '30', + red: '31', + green: '32', + yellow: '33', + blue: '34', + magenta: '35', + cyan: '36', + white: '37', + + // Bright Colors + brightRed: '91', + brightGreen: '92', + brightYellow: '93', +}; + +/** + * ANSIStyle maps ANSI style names to their respective escape codes. + * + * @typedef {'bold' | 'faint' | 'underline' | 'reversed'} ANSIStyle + * + * @typedef {Record} ANSIStyles + */ +const ANSIStyles = { + bold: '1', + faint: '2', + underline: '4', + reversed: '7', +}; + +/** + * Renders a section title with a specified formatter, indentation level, and options. + * + * For example, a bold section title at first indentation level with a block prefix and newline suffix: + * █ THRESHOLDS + * + * @param {string} title - The section title to render. + * @param {ANSIFormatter} formatter - The ANSI formatter to use for text decoration. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Options & Object} options - Additional options for rendering the section title. + * @param {string} [options.prefix=titlePrefix] - The prefix to use for the section title. + * @param {string} [options.suffix='\n'] - The suffix to use for the section title. + * @returns {string} - The formatted section title. + */ +function renderTitle( + title, + formatter, + renderContext, + options = {prefix: titlePrefix, suffix: '\n'}, +) { + return renderContext.indent( + `${options.prefix} ${formatter.boldify(title)} ${options.suffix || ''}`, + ); } -function indentForMetric(name) { - if (name.indexOf('{') >= 0) { - return ' ' - } - return '' +/** + * Renders a single check into a formatted line ready for output. + * + * @param {EngineCheck} check - The check object with name, passes and fails + * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string} - A formatted line summarizing the check. + */ +function renderCheck(check, formatter, renderContext) { + // If the check was successful, immediately render a green line indicating success + if (check.fails === 0) { + return renderContext.indent( + formatter.decorate(successMark + ' ' + check.name, 'green'), + ); + } + + // Other we want to display both the check name and the percentage of successful checks + // in red, along with the number of passes and fails. + const successfulPct = Math.floor( + (100 * check.passes) / (check.passes + check.fails), + ); + + const checkName = formatter.decorate(failMark + ' ' + check.name, 'red'); + const results = formatter.decorate( + subtitlePrefix + + ' ' + + successfulPct + + '% — ' + + successMark + + ' ' + + check.passes + + ' / ' + + failMark + + ' ' + + check.fails, + 'red', + ); + + return ( + renderContext.indent(checkName) + + '\n' + + renderContext.indent(results, renderContext.baseIndentationLevel + 1) + ); } -function humanizeBytes(bytes) { - var units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] - var base = 1000 - if (bytes < 10) { - return bytes + ' B' - } - - var e = Math.floor(Math.log(bytes) / Math.log(base)) - var suffix = units[e | 0] - var val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10 - return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix +/** + * Renders checks into a formatted set of lines ready for display in the terminal. + * + * @param {ReportChecks} checks + * @param formatter + * @param {RenderContext} renderContext + * @param options + * @returns {*[]} + */ +function renderChecks(checks, formatter, renderContext, options = {}) { + // If no checks exist, return empty array + if (!checks || !checks.ordered_checks) { + return []; + } + + // Add indentation to the render context for checks + renderContext = renderContext.indentedContext(1); + + const {showPassedChecks = true, showFailedChecks = true} = options; + + // Process each check and filter based on options + const renderedChecks = checks.ordered_checks + .filter((check) => { + // Filter logic for passed/failed checks + if (check.fails === 0 && !showPassedChecks) return false; + return !(check.fails > 0 && !showFailedChecks); + }) + .map((check) => renderCheck(check, formatter, renderContext)); + + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + checks.metrics, + renderContext, + options, + ); + + // Render metrics for checks if they exist + const checkMetrics = checks.metrics + ? renderMetrics(checks.metrics, summaryInfo, formatter, renderContext, { + ...options, + sortByName: false, + }) + : []; + + // Combine metrics and checks + return [...checkMetrics, '\n', ...renderedChecks]; } -var unitMap = { - s: { unit: 's', coef: 0.001 }, - ms: { unit: 'ms', coef: 1 }, - us: { unit: 'µs', coef: 1000 }, +/** + * Summarizes metrics into an array of formatted lines ready to be printed to stdout. + * + * @param {Record} metrics - The data object containing metrics. + * @param {SummaryInfo} summaryInfo - An object containing summary information such as maximum name width and trend columns. + * @param {ANSIFormatter} formatter - An ANSIFormatter function for ANSI colors. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Options} options - Display options merged with defaultOptions. + * @returns {string[]} + */ +function renderMetrics( + metrics, + summaryInfo, + formatter, + renderContext, + options, +) { + // Extract all metric names + let metricNames = Object.keys(metrics); + + // If sorting by name is required, do it now + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames); + } + + // Format each metric line + return metricNames.map((name) => { + const metric = metrics[name]; + return renderMetricLine( + name, + metric, + summaryInfo, + options, + formatter, + renderContext, + ); + }); } -function toFixedNoTrailingZeros(val, prec) { - // TODO: figure out something better? - return parseFloat(val.toFixed(prec)).toString() +/** + * Renders each thresholds results into a formatted set of lines ready for display in the terminal. + * + * @param {Record} metrics - The data object containing metrics. + * @param {ANSIFormatter} formatter - ANSI formatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Object} options - Options merged with defaults. + * @returns {string[]} - Array of formatted lines including threshold statuses. + */ +function renderThresholds(metrics, formatter, renderContext, options) { + // Extract and optionally sort metric names + let metricNames = Object.keys(metrics); + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames); + } + + // Precompute all formatting information + const summaryInfo = computeSummaryInfo( + metrics, + renderContext, + options, + ); + + // Format each threshold line by preparing each metric affected by a threshold, as + // well as the thresholds results for each expression. + const result = []; + for (const name of metricNames) { + const parentName = name.split('{', 1)[0]; + const isSubmetric = name.length > parentName.length; + const parentMetricExists = !!metrics[parentName]; + + const innerContext = (isSubmetric && parentMetricExists) + ? renderContext.indentedContext() + : renderContext; + + const line = renderMetricNameForThresholds( + name, + parentName, + isSubmetric, + parentMetricExists, + innerContext + ); + result.push(line); + + const metric = metrics[name]; + if (metric.thresholds) { + const thresholdLines = renderThresholdResults( + metric, + summaryInfo, + formatter, + innerContext, + ); + result.push(...thresholdLines, '\n'); + } + } + + return result } -function toFixedNoTrailingZerosTrunc(val, prec) { - var mult = Math.pow(10, prec) - return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec) +/** + * Renders each threshold result into a formatted set of lines ready for display in the terminal. + * + * @param {ReportMetric} metric - The metric with the thresholds to render. + * @param {SummaryInfo} summaryInfo - An object containing summary information such as maximum name width and trend columns. + * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string[]} - An array of formatted lines including threshold statuses. + */ +function renderThresholdResults( + metric, + summaryInfo, + formatter, + renderContext, +) { + const lines = []; + + forEach(metric.thresholds, (_, threshold) => { + const isSatisfied = threshold.ok; + const statusText = isSatisfied + ? formatter.decorate(successMark, 'green') + : formatter.decorate(failMark, 'red'); + + const sourceText = formatter.decorate( + `'${threshold.source}'`, + 'white', + ); + + const metricValueText = renderMetricValueForThresholds( + metric, + threshold, + summaryInfo, + formatter, + ) + + // Here we push a line describing the threshold's result + lines.push( + renderContext.indent([statusText, sourceText, metricValueText].join(' ')), + ); + }); + + return lines; } -function humanizeGenericDuration(dur) { - if (dur === 0) { - return '0s' - } - - if (dur < 0.001) { - // smaller than a microsecond, print nanoseconds - return Math.trunc(dur * 1000000) + 'ns' - } - if (dur < 1) { - // smaller than a millisecond, print microseconds - return toFixedNoTrailingZerosTrunc(dur * 1000, 2) + 'µs' - } - if (dur < 1000) { - // duration is smaller than a second - return toFixedNoTrailingZerosTrunc(dur, 2) + 'ms' - } - - var result = toFixedNoTrailingZerosTrunc((dur % 60000) / 1000, dur > 60000 ? 0 : 2) + 's' - var rem = Math.trunc(dur / 60000) - if (rem < 1) { - // less than a minute - return result - } - result = (rem % 60) + 'm' + result - rem = Math.trunc(rem / 60) - if (rem < 1) { - // less than an hour - return result - } - return rem + 'h' + result +/** + * Renders a metric line into a formatted string for display. + * + * @param {string} name - The name of the metric. + * @param {ReportMetric} metric - The metric object containing details about the metric. + * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. + * @param {Options} options - Configuration options for summarizing metrics. + * @param {ANSIFormatter} formatter - A function to apply ANSI colors to text. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string} - The formatted metric line. + */ +function renderMetricLine( + name, + metric, + info, + options, + formatter, + renderContext, +) { + const {maxNameWidth} = info; + + const displayedName = renderMetricDisplayName(name); + const fmtIndent = renderContext.indentLevel(); + + // Compute the trailing dots: + // Use `3` as a spacing offset as per original code. + const dotsCount = + maxNameWidth - strWidth(displayedName) - strWidth(fmtIndent) + 3; + const dottedName = + displayedName + + formatter.decorate('.'.repeat(dotsCount) + ':', 'white', 'faint'); + + const dataPart = + metric.type === 'trend' + ? renderTrendData(name, info, formatter) + : renderNonTrendData(name, info, formatter); + + return renderContext.indent(dottedName + ' ' + dataPart); } -function humanizeDuration(dur, timeUnit) { - if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { - return (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit - } +/** + * Formats a metric or submetric line for the thresholds' section output. + * + * @param {string} name - The name of the metric + * @param {string} parentName - The name of the parent metric + * @param {boolean} isSubmetric - Whether the metric is a submetric + * @param {boolean} parentMetricExists - In case of submetric, whether the parent metric exists + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @returns {string} - The metric name report line + */ +function renderMetricNameForThresholds( + name, + parentName, + isSubmetric, + parentMetricExists, + renderContext, +) { + // If it's a parent metric, or it's a submetric, + // which parent metric is not included in results, we just print the name. + if (!isSubmetric || !parentMetricExists) { + return renderContext.indent(name); + } - return humanizeGenericDuration(dur) + // Otherwise, we only print the labels. + return renderContext.indent(name.substring(parentName.length)); } -function humanizeValue(val, metric, timeUnit) { - if (metric.type == 'rate') { - // Truncate instead of round when decreasing precision to 2 decimal places - return (Math.trunc(val * 100 * 100) / 100).toFixed(2) + '%' - } - - switch (metric.contains) { - case 'data': - return humanizeBytes(val) - case 'time': - return humanizeDuration(val, timeUnit) - default: - return toFixedNoTrailingZeros(val, 6) - } +/** + * Formats the metric's value for the thresholds' section output. + * + * @param {ReportMetric} metric - The metric for which value will be rendered. + * @param {EngineThreshold} threshold - The threshold to use for rendering. + * @param {SummaryInfo} info - An object containing summary information such as maximum name width and trend columns. + * @param {ANSIFormatter} formatter - ANSIFormatter used for decorating text. + * @returns {string} - The metric's value line in the form: `{agg}={value}` + */ +function renderMetricValueForThresholds( + metric, + threshold, + info, + formatter, +) { + const {trendStats, trendCols, nonTrendValues, nonTrendExtras} = info; + const thresholdAgg = threshold.source.split(/[=><]/)[0].trim(); + + let value; + switch (metric.type) { + case 'trend': + value = trendCols[metric.name][trendStats.indexOf(thresholdAgg)] + break; + case 'counter': + value = (thresholdAgg === 'count') + ? nonTrendValues[metric.name] + : nonTrendExtras[metric.name][0]; + break; + default: + value = nonTrendValues[metric.name]; + } + + return [ + formatter.decorate(thresholdAgg, 'white'), + formatter.decorate(value, 'cyan') + ].join('='); } -function nonTrendMetricValueForSum(metric, timeUnit) { - switch (metric.type) { - case 'counter': - return [ - humanizeValue(metric.values.count, metric, timeUnit), - humanizeValue(metric.values.rate, metric, timeUnit) + '/s', - ] - case 'gauge': - return [ - humanizeValue(metric.values.value, metric, timeUnit), - 'min=' + humanizeValue(metric.values.min, metric, timeUnit), - 'max=' + humanizeValue(metric.values.max, metric, timeUnit), - ] - case 'rate': - return [ - humanizeValue(metric.values.rate, metric, timeUnit), - `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, - ] - default: - return ['[no data]'] - } +/** + * Format data for trend metrics. + */ +function renderTrendData(name, info, formatter) { + const {trendStats, trendCols, trendColMaxLens} = info; + const cols = trendCols[name]; + + return cols + .map((col, i) => { + const statName = trendStats[i]; + const padding = ' '.repeat(trendColMaxLens[i] - strWidth(col)); + return statName + '=' + formatter.decorate(col, 'cyan') + padding; + }) + .join(' '); +} + +/** + * Format data for non-trend metrics. + * + * @param {string} name - The metric name. + * @param {SummaryInfo} info - The summary information object. + * @param {ANSIFormatter} formatter - A decoration function for ANSI colors. + */ +function renderNonTrendData(name, info, formatter) { + const { + nonTrendValues, + nonTrendExtras, + maxNonTrendValueLen, + nonTrendExtraMaxLens, + } = info; + + const value = nonTrendValues[name]; + const extras = nonTrendExtras[name] || []; + + let result = formatter.decorate(value, 'cyan'); + result += ' '.repeat(maxNonTrendValueLen - strWidth(value)); + + if (extras.length === 1) { + // Single extra value + result += ' ' + formatter.decorate(extras[0], 'cyan', 'faint'); + } else if (extras.length > 1) { + // Multiple extras need their own spacing + const parts = extras.map((val, i) => { + const extraSpace = ' '.repeat( + nonTrendExtraMaxLens[i] - strWidth(val), + ); + return formatter.decorate(val, 'cyan', 'faint') + extraSpace; + }); + result += ' ' + parts.join(' '); + } + + return result; +} + +/** + * + * @param {number} value + * @param {string} stat + * @param {ReportMetric} metric + * @param {Options} options + * @returns {string} + */ +function renderTrendValue(value, stat, metric, options) { + if (stat === 'count') { + return value.toString(); + } + return humanizeValue(value, metric, options.summaryTimeUnit); +} + +/** + * Compute all necessary formatting information such as maximum lengths, trend columns and non-trend values for each + * metric. + * + * @typedef {Object} SummaryInfo + * @property {number} maxNameWidth - The maximum width of the metric names. + * @property {Object} nonTrendValues - The non-trend metric values. + * @property {Object} nonTrendExtras - The non-trend metric extras. + * @property {Object} trendCols - The trend columns. + * @property {number[]} trendColMaxLens - The trend column maximum lengths. + * @property {number} numTrendColumns - The number of trend columns. + * @property {string[]} trendStats - The trend statistics. + * @property {number} maxNonTrendValueLen - The maximum non-trend value length. + * @property {number[]} nonTrendExtraMaxLens - The non-trend extra maximum lengths. + * + * @param {Record} metrics - The data object containing metrics. + * @param {RenderContext} renderContext - The render context to use for text rendering. + * @param {Options} options - Display options merged with defaultOptions. + * @returns {SummaryInfo} + */ +function computeSummaryInfo(metrics, renderContext, options) { + const trendStats = options.summaryTrendStats; + const numTrendColumns = trendStats.length; + + const nonTrendValues = {}; + const nonTrendExtras = {}; + const trendCols = {}; + + let maxNameWidth = 0; + let maxNonTrendValueLen = 0; + let nonTrendExtraMaxLens = []; // FIXME: "lens"? + + // Initialize tracking arrays for trend widths + const trendColMaxLens = new Array(numTrendColumns).fill(0); + + let metricNames = Object.keys(metrics); + // If sorting by name is required, do it now + if (options.sortByName) { + metricNames = sortMetricsByName(metricNames); + } + + for (const name of metricNames) { + const metric = metrics[name]; + const displayName = renderContext.indent( + name + renderMetricDisplayName(name), + ); + maxNameWidth = Math.max(maxNameWidth, strWidth(displayName)); + + if (metric.type === 'trend') { + const cols = trendStats.map((stat) => + renderTrendValue(metric.values[stat], stat, metric, options), + ); + + // Compute max column widths + cols.forEach((col, index) => { + trendColMaxLens[index] = Math.max( + trendColMaxLens[index], + strWidth(col), + ); + }); + trendCols[name] = cols; + } else { + const values = nonTrendMetricValueForSum( + metric, + options.summaryTimeUnit, + ); + const mainValue = values[0]; // FIXME (@oleiade) we should assert that the index exists here + nonTrendValues[name] = mainValue; + maxNonTrendValueLen = Math.max( + maxNonTrendValueLen, + strWidth(mainValue), + ); + + // FIXME (@oleiade): what the fuck is an extra, really? + const extras = values.slice(1); + nonTrendExtras[name] = extras; + extras.forEach((value, index) => { + const width = strWidth(value); + if ( + nonTrendExtraMaxLens[index] === undefined || + width > nonTrendExtraMaxLens[index] + ) { + nonTrendExtraMaxLens[index] = width; + } + }); + } + } + + return { + maxNameWidth, + nonTrendValues, + nonTrendExtras, + trendCols, + trendColMaxLens, + numTrendColumns, + trendStats, + maxNonTrendValueLen, + nonTrendExtraMaxLens, + }; +} + +/** + * Sorts metrics by name, keeping sub-metrics grouped with their parent metrics. + * + * @param {string[]} metricNames - The metric names to sort. + * @returns {string[]} - The sorted metric names. + */ +function sortMetricsByName(metricNames) { + metricNames.sort(function (lhsMetricName, rhsMetricName) { + const lhsParent = lhsMetricName.split('{', 1)[0]; + const rhsParent = rhsMetricName.split('{', 1)[0]; + const result = lhsParent.localeCompare(rhsParent); + if (result !== 0) { + return result; + } + const lhsSub = lhsMetricName.substring(lhsParent.length); + const rhsSub = rhsMetricName.substring(rhsParent.length); + return lhsSub.localeCompare(rhsSub); + }); + + return metricNames; +} + +/** + * A simple iteration utility function for objects. + * + * @param {Object} obj - the object to iterate over + * @param {(key: string, value: any) => (boolean|void)} callback - Callback invoked with (key, value) + */ +function forEach(obj, callback) { + for (const key in obj) { + if (obj.hasOwnProperty(key)) { + if (callback(key, obj[key])) { + break; + } + } + } } -function summarizeMetrics(options, data, decorate) { - var indent = options.indent + ' ' - var result = [] - - var names = [] - var nameLenMax = 0 - - var nonTrendValues = {} - var nonTrendValueMaxLen = 0 - var nonTrendExtras = {} - var nonTrendExtraMaxLens = [0, 0] - - var trendCols = {} - var numTrendColumns = options.summaryTrendStats.length - var trendColMaxLens = new Array(numTrendColumns).fill(0) - forEach(data.metrics, function (name, metric) { - names.push(name) - // When calculating widths for metrics, account for the indentation on submetrics. - var displayName = indentForMetric(name) + displayNameForMetric(name) - var displayNameWidth = strWidth(displayName) - if (displayNameWidth > nameLenMax) { - nameLenMax = displayNameWidth - } - - if (metric.type == 'trend') { - var cols = [] - for (var i = 0; i < numTrendColumns; i++) { - var tc = options.summaryTrendStats[i] - var value = metric.values[tc] - if (tc === 'count') { - value = value.toString() - } else { - value = humanizeValue(value, metric, options.summaryTimeUnit) - } - var valLen = strWidth(value) - if (valLen > trendColMaxLens[i]) { - trendColMaxLens[i] = valLen - } - cols[i] = value - } - trendCols[name] = cols - return - } - var values = nonTrendMetricValueForSum(metric, options.summaryTimeUnit) - nonTrendValues[name] = values[0] - var valueLen = strWidth(values[0]) - if (valueLen > nonTrendValueMaxLen) { - nonTrendValueMaxLen = valueLen - } - nonTrendExtras[name] = values.slice(1) - for (var i = 1; i < values.length; i++) { - var extraLen = strWidth(values[i]) - if (extraLen > nonTrendExtraMaxLens[i - 1]) { - nonTrendExtraMaxLens[i - 1] = extraLen - } - } - }) - - // sort all metrics but keep sub metrics grouped with their parent metrics - names.sort(function (metric1, metric2) { - var parent1 = metric1.split('{', 1)[0] - var parent2 = metric2.split('{', 1)[0] - var result = parent1.localeCompare(parent2) - if (result !== 0) { - return result - } - var sub1 = metric1.substring(parent1.length) - var sub2 = metric2.substring(parent2.length) - return sub1.localeCompare(sub2) - }) - - var getData = function (name) { - if (trendCols.hasOwnProperty(name)) { - var cols = trendCols[name] - var tmpCols = new Array(numTrendColumns) - for (var i = 0; i < cols.length; i++) { - tmpCols[i] = - options.summaryTrendStats[i] + - '=' + - decorate(cols[i], palette.cyan) + - ' '.repeat(trendColMaxLens[i] - strWidth(cols[i])) - } - return tmpCols.join(' ') - } - - var value = nonTrendValues[name] - var fmtData = decorate(value, palette.cyan) + ' '.repeat(nonTrendValueMaxLen - strWidth(value)) - - var extras = nonTrendExtras[name] - if (extras.length == 1) { - fmtData = fmtData + ' ' + decorate(extras[0], palette.cyan, palette.faint) - } else if (extras.length > 1) { - var parts = new Array(extras.length) - for (var i = 0; i < extras.length; i++) { - parts[i] = - decorate(extras[i], palette.cyan, palette.faint) + - ' '.repeat(nonTrendExtraMaxLens[i] - strWidth(extras[i])) - } - fmtData = fmtData + ' ' + parts.join(' ') - } - - return fmtData - } - - for (var name of names) { - var metric = data.metrics[name] - var mark = ' ' - var markColor = function (text) { - return text - } // noop - - if (metric.thresholds) { - mark = succMark - markColor = function (text) { - return decorate(text, palette.green) - } - forEach(metric.thresholds, function (name, threshold) { - if (!threshold.ok) { - mark = failMark - markColor = function (text) { - return decorate(text, palette.red) - } - return true // break - } - }) - } - var fmtIndent = indentForMetric(name) - var fmtName = displayNameForMetric(name) - fmtName = - fmtName + - decorate( - '.'.repeat(nameLenMax - strWidth(fmtName) - strWidth(fmtIndent) + 3) + ':', - palette.faint - ) - - result.push(indent + fmtIndent + markColor(mark) + ' ' + fmtName + ' ' + getData(name)) - } - - return result +const titlePrefix = '█'; +const subtitlePrefix = '↳'; +const successMark = '✓'; +const failMark = '✗'; +const defaultOptions = { + indent: ' ', + enableColors: true, // FIXME (@oleiade): we should ensure we respect this flag + summaryTimeUnit: null, + summaryTrendStats: null, + sortByName: true, +}; + +/** + * Compute the width of a string as displayed in a terminal, excluding ANSI codes, terminal + * formatting, Unicode ligatures, etc. + * + * @param {string} s - The string to measure + * @returns {number} The display width of the string + */ +function strWidth(s) { + // TODO: determine if NFC or NFKD are not more appropriate? or just give up? https://hsivonen.fi/string-length/ + const data = s.normalize('NFKC'); // This used to be NFKD in Go, but this should be better + let inEscSeq = false; + let inLongEscSeq = false; + let width = 0; + for (const char of data) { + if (char.done) { + break; + } + + // Skip over ANSI escape codes. + if (char === '\x1b') { + inEscSeq = true; + continue; + } + if (inEscSeq && char === '[') { + inLongEscSeq = true; + continue; + } + if ( + inEscSeq && + inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x7e + ) { + inEscSeq = false; + inLongEscSeq = false; + continue; + } + if ( + inEscSeq && + !inLongEscSeq && + char.charCodeAt(0) >= 0x40 && + char.charCodeAt(0) <= 0x5f + ) { + inEscSeq = false; + continue; + } + + if (!inEscSeq && !inLongEscSeq) { + width++; + } + } + return width; } -function generateTextSummary(data, options) { - var mergedOpts = Object.assign({}, defaultOptions, data.options, options) - var lines = [] - - // TODO: move all of these functions into an object with methods? - var decorate = function (text) { - return text - } - if (mergedOpts.enableColors) { - decorate = function (text, color /*, ...rest*/) { - var result = '\x1b[' + color - for (var i = 2; i < arguments.length; i++) { - result += ';' + arguments[i] - } - return result + 'm' + text + '\x1b[0m' - } - } - - Array.prototype.push.apply( - lines, - summarizeGroup(mergedOpts.indent + ' ', data.root_group, decorate) - ) - - Array.prototype.push.apply(lines, summarizeMetrics(mergedOpts, data, decorate)) - - return lines.join('\n') +/** + * Extracts a display name for a metric, handling sub-metrics (e.g. "metric{sub}" -> "{ sub }"). + * + * @param {string} name - The metric name. + * @returns {string} - The display name + */ +function renderMetricDisplayName(name) { + const subMetricPos = name.indexOf('{'); + if (subMetricPos >= 0) { + return ' { ' + name.substring(subMetricPos + 1, name.length - 1) + ' }'; + } + return name; +} + +/** + * Converts a number of bytes into a human-readable string with units. + * + * @param {number} bytes - The number of bytes. + * @returns {string} A human-readable string (e.g. "10 kB"). + */ +function humanizeBytes(bytes) { + const units = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + const base = 1000; + if (bytes < 10) { + return bytes + ' B'; + } + + const e = Math.floor(Math.log(bytes) / Math.log(base)); + const suffix = units[e | 0]; + const val = Math.floor((bytes / Math.pow(base, e)) * 10 + 0.5) / 10; + return val.toFixed(val < 10 ? 1 : 0) + ' ' + suffix; } -exports.humanizeValue = humanizeValue -exports.textSummary = generateTextSummary \ No newline at end of file +const unitMap = { + s: {unit: 's', coef: 0.001}, + ms: {unit: 'ms', coef: 1}, + us: {unit: 'µs', coef: 1000}, +}; + +/** + * Converts a number to a fixed decimal string, removing trailing zeros. + * + * @param {number} val - The number to convert. + * @param {number} prec - Decimal precision. + * @returns {string} A string representation of the number without trailing zeros. + */ +function toFixedNoTrailingZeros(val, prec) { + return parseFloat(val.toFixed(prec)).toString(); +} + +/** + * Truncates a number to a certain precision without rounding, then removes trailing zeros. + * + * @param {number} val - The number to truncate. + * @param {number} prec - Decimal precision. + * @returns {string} A truncated, not rounded string representation. + */ +function toFixedNoTrailingZerosTrunc(val, prec) { + const mult = Math.pow(10, prec); + return toFixedNoTrailingZeros(Math.trunc(mult * val) / mult, prec); +} + +/** + * Humanizes a duration (in milliseconds) to a human-readable string, + * choosing appropriate units (ns, µs, ms, s, m, h). + * + * @param {number} duration - The duration in milliseconds. + * @returns {string} Human-readable duration (e.g. "2.5ms", "3s", "1m30s"). + */ +function humanizeGenericDuration(duration) { + if (duration === 0) { + return '0s'; + } + + if (duration < 0.001) { + // smaller than a microsecond, print nanoseconds + return Math.trunc(duration * 1000000) + 'ns'; + } + if (duration < 1) { + // smaller than a millisecond, print microseconds + return toFixedNoTrailingZerosTrunc(duration * 1000, 2) + 'µs'; + } + if (duration < 1000) { + // duration is smaller than a second + return toFixedNoTrailingZerosTrunc(duration, 2) + 'ms'; + } + + let fixedDuration = + toFixedNoTrailingZerosTrunc( + (duration % 60000) / 1000, + duration > 60000 ? 0 : 2, + ) + 's'; + let rem = Math.trunc(duration / 60000); + if (rem < 1) { + // less than a minute + return fixedDuration; + } + fixedDuration = (rem % 60) + 'm' + fixedDuration; + rem = Math.trunc(rem / 60); + if (rem < 1) { + // less than an hour + return fixedDuration; + } + return rem + 'h' + fixedDuration; +} + +/** + * Humanizes a duration according to a specified time unit or uses a generic formatting. + * + * @param {number} dur - The duration in milliseconds. + * @param {string|null} timeUnit - Optional time unit (e.g. "ms", "s"). + * @returns {string} A human-readable duration string. + */ +function humanizeDuration(dur, timeUnit) { + if (timeUnit !== '' && unitMap.hasOwnProperty(timeUnit)) { + return ( + (dur * unitMap[timeUnit].coef).toFixed(2) + unitMap[timeUnit].unit + ); + } + + return humanizeGenericDuration(dur); +} + +/** + * Returns the summary values for non-trend metrics (counter, gauge, rate). + * + * @param {ReportMetric} metric - The metric to summarize. + * @param {string|null} timeUnit - The time unit for durations. + * @returns {string[]} - An array of summary values. + */ +function nonTrendMetricValueForSum(metric, timeUnit) { + switch (metric.type) { + case 'counter': + return [ + humanizeValue(metric.values.count, metric, timeUnit), + humanizeValue(metric.values.rate, metric, timeUnit) + '/s', + ]; + case 'gauge': + return [ + humanizeValue(metric.values.value, metric, timeUnit), + 'min=' + humanizeValue(metric.values.min, metric, timeUnit), + 'max=' + humanizeValue(metric.values.max, metric, timeUnit), + ]; + case 'rate': + return [ + humanizeValue(metric.values.rate, metric, timeUnit), + `${metric.values.passes} out of ${metric.values.passes + metric.values.fails}`, + ]; + default: + return ['[no data]']; + } +} diff --git a/internal/js/summary_test.go b/internal/js/summary_test.go index 2a2da6c508f..6ecf7027599 100644 --- a/internal/js/summary_test.go +++ b/internal/js/summary_test.go @@ -49,7 +49,7 @@ func TestTextSummary(t *testing.T) { i, tc := i, tc t.Run(fmt.Sprintf("%d_%v", i, tc.stats), func(t *testing.T) { t.Parallel() - summary := createTestSummary(t) + legacySummary := createTestLegacySummary(t) trendStats, err := json.Marshal(tc.stats) require.NoError(t, err) runner, err := getSimpleRunner( @@ -62,7 +62,7 @@ func TestTextSummary(t *testing.T) { ) require.NoError(t, err) - result, err := runner.HandleSummary(context.Background(), summary) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) @@ -102,7 +102,7 @@ func TestTextSummaryWithSubMetrics(t *testing.T) { subMetricPost.Name: subMetricPost.Metric, } - summary := &lib.Summary{ + legacySummary := &lib.LegacySummary{ Metrics: metrics, RootGroup: &lib.Group{}, TestRunDuration: time.Second, @@ -116,7 +116,7 @@ func TestTextSummaryWithSubMetrics(t *testing.T) { ) require.NoError(t, err) - result, err := runner.HandleSummary(context.Background(), summary) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) @@ -205,9 +205,9 @@ func createTestMetrics(t *testing.T) (map[string]*metrics.Metric, *lib.Group) { return testMetrics, rootG } -func createTestSummary(t *testing.T) *lib.Summary { +func createTestLegacySummary(t *testing.T) *lib.LegacySummary { metrics, rootG := createTestMetrics(t) - return &lib.Summary{ + return &lib.LegacySummary{ Metrics: metrics, RootGroup: rootG, TestRunDuration: time.Second, @@ -306,8 +306,8 @@ func TestOldJSONExport(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 2) @@ -576,8 +576,8 @@ func TestRawHandleSummaryData(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 2) @@ -611,8 +611,8 @@ func TestRawHandleSummaryDataWithSetupData(t *testing.T) { require.NoError(t, err) runner.SetSetupData([]byte("5")) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) dataWithSetup, err := io.ReadAll(result["dataWithSetup.json"]) require.NoError(t, err) @@ -634,8 +634,8 @@ func TestRawHandleSummaryPromise(t *testing.T) { require.NoError(t, err) runner.SetSetupData([]byte("5")) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) dataWithSetup, err := io.ReadAll(result["dataWithSetup.json"]) require.NoError(t, err) @@ -659,8 +659,8 @@ func TestWrongSummaryHandlerExportTypes(t *testing.T) { ) require.NoError(t, err) - summary := createTestSummary(t) - _, err = runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + _, err = runner.HandleSummary(context.Background(), legacySummary, nil) require.Error(t, err) }) } @@ -684,8 +684,8 @@ func TestExceptionInHandleSummaryFallsBackToTextSummary(t *testing.T) { require.NoError(t, err) - summary := createTestSummary(t) - result, err := runner.HandleSummary(context.Background(), summary) + legacySummary := createTestLegacySummary(t) + result, err := runner.HandleSummary(context.Background(), legacySummary, nil) require.NoError(t, err) require.Len(t, result, 1) diff --git a/internal/lib/testutils/minirunner/minirunner.go b/internal/lib/testutils/minirunner/minirunner.go index 3f7b48d3cc8..f8b8e985f36 100644 --- a/internal/lib/testutils/minirunner/minirunner.go +++ b/internal/lib/testutils/minirunner/minirunner.go @@ -24,7 +24,7 @@ type MiniRunner struct { Fn func(ctx context.Context, state *lib.State, out chan<- metrics.SampleContainer) error SetupFn func(ctx context.Context, out chan<- metrics.SampleContainer) ([]byte, error) TeardownFn func(ctx context.Context, out chan<- metrics.SampleContainer) error - HandleSummaryFn func(context.Context, *lib.Summary) (map[string]io.Reader, error) + HandleSummaryFn func(context.Context, *lib.LegacySummary) (map[string]io.Reader, error) SetupData []byte @@ -108,7 +108,11 @@ func (r *MiniRunner) SetOptions(opts lib.Options) error { } // HandleSummary calls the specified summary callback, if supplied. -func (r *MiniRunner) HandleSummary(ctx context.Context, s *lib.Summary) (map[string]io.Reader, error) { +func (r *MiniRunner) HandleSummary( + ctx context.Context, + s *lib.LegacySummary, + _ *lib.Summary, +) (map[string]io.Reader, error) { if r.HandleSummaryFn != nil { return r.HandleSummaryFn(ctx, s) } diff --git a/js/.eslintignore b/js/.eslintignore new file mode 100644 index 00000000000..d5d55e77cbc --- /dev/null +++ b/js/.eslintignore @@ -0,0 +1,5 @@ +# Ignore everything in this folder +* + +# But the end-of-test summary file +!summary.js diff --git a/js/.eslintrc b/js/.eslintrc new file mode 100644 index 00000000000..81c27781453 --- /dev/null +++ b/js/.eslintrc @@ -0,0 +1,90 @@ +{ + "parserOptions": { + "ecmaFeatures": { + "jsx": true + }, + "ecmaVersion": 2020, + "sourceType": "module" + }, + "extends": ["airbnb", "prettier"], + "plugins": ["react", "prettier"], + "env": { + "browser": true, + "node": true + }, + "rules": { + "prettier/prettier": ["error"], + "arrow-body-style": "warn", + "camelcase": 0, + "object-curly-newline": 0, + "operator-linebreak": 0, + "no-shadow": 0, + "max-len": [2, 120], + "no-underscore-dangle": "off", + "react/prop-types": 0, + "react/function-component-definition": 0, + "react/no-unstable-nested-components": 0, + "react/jsx-curly-brace-presence": [ + 2, + { + "props": "ignore", + "children": "never" + } + ], + "react/jsx-tag-spacing": [ + 2, + { + "closingSlash": "never", + "beforeSelfClosing": "always", + "afterOpening": "allow-multiline", + "beforeClosing": "never" + } + ], + "react/jsx-filename-extension": [ + 2, + { + "extensions": [".js"] + } + ], + "react/no-array-index-key": 0, + "react/jsx-one-expression-per-line": 0, + "react/jsx-props-no-spreading": 0, + "react/jsx-wrap-multilines": 0, + "import/no-extraneous-dependencies": [ + "warn", + { "devDependencies": false, "peerDependencies": true } + ], + "import/order": [ + "warn", + { + "alphabetize": { + "order": "asc" /* sort in ascending order. Options: ['ignore', 'asc', 'desc'] */, + "caseInsensitive": true /* ignore case. Options: [true, false] */ + }, + "newlines-between": "always" + } + ], + "import/no-unresolved": [ + 2, + { + "ignore": [ + "components", + "hooks", + "images", + "layouts", + "pages", + "styles", + "svg", + "templates", + "utils", + "contexts", + "i18n", + "data" + ] + } + ], + "import/prefer-default-export": 0, + "jsx-a11y/html-has-lang": 0, + "jsx-a11y/control-has-associated-label": 0 + } +} diff --git a/js/.prettierignore b/js/.prettierignore new file mode 100644 index 00000000000..d5d55e77cbc --- /dev/null +++ b/js/.prettierignore @@ -0,0 +1,5 @@ +# Ignore everything in this folder +* + +# But the end-of-test summary file +!summary.js diff --git a/js/.prettierrc b/js/.prettierrc new file mode 100644 index 00000000000..9b25e806cb8 --- /dev/null +++ b/js/.prettierrc @@ -0,0 +1,4 @@ +{ + "singleQuote": true, + "trailingComma": "all" +} diff --git a/lib/models.go b/lib/models.go index 64f4e0f0335..41bc34e786a 100644 --- a/lib/models.go +++ b/lib/models.go @@ -207,7 +207,7 @@ type Check struct { Fails int64 `json:"fails"` } -// NewCheck creates a new check with the given name and parent group. The group may not be nil. +// NewCheck creates a new check with the given name and parent group. The group must not be nil. func NewCheck(name string, group *Group) (*Check, error) { if strings.Contains(name, GroupSeparator) { return nil, ErrNameContainsGroupSeparator diff --git a/lib/runner.go b/lib/runner.go index 3d840f22b79..9afdff91116 100644 --- a/lib/runner.go +++ b/lib/runner.go @@ -3,7 +3,6 @@ package lib import ( "context" "io" - "time" "go.k6.io/k6/metrics" ) @@ -81,7 +80,7 @@ type Runner interface { // function in the script. IsExecutable(string) bool - HandleSummary(context.Context, *Summary) (map[string]io.Reader, error) + HandleSummary(context.Context, *LegacySummary, *Summary) (map[string]io.Reader, error) } // UIState describes the state of the UI, which might influence what @@ -90,12 +89,3 @@ type UIState struct { IsStdOutTTY bool IsStdErrTTY bool } - -// Summary contains all of the data the summary handler gets. -type Summary struct { - Metrics map[string]*metrics.Metric - RootGroup *Group - TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? - NoColor bool // TODO: drop this when noColor is part of the (runtime) options - UIState UIState -} diff --git a/lib/runtime_options.go b/lib/runtime_options.go index b82acc38db5..f4287a96f52 100644 --- a/lib/runtime_options.go +++ b/lib/runtime_options.go @@ -41,6 +41,7 @@ type RuntimeOptions struct { NoThresholds null.Bool `json:"noThresholds"` NoSummary null.Bool `json:"noSummary"` + SummaryMode null.String `json:"summaryMode"` SummaryExport null.String `json:"summaryExport"` KeyWriter null.String `json:"-"` TracesOutput null.String `json:"tracesOutput"` diff --git a/lib/summary.go b/lib/summary.go new file mode 100644 index 00000000000..c0996637894 --- /dev/null +++ b/lib/summary.go @@ -0,0 +1,298 @@ +package lib + +import ( + "errors" + "time" + + "go.k6.io/k6/metrics" +) + +// A SummaryMode specifies the mode of the Summary, +// which defines how the end-of-test summary will be rendered. +type SummaryMode int + +// Possible values for SummaryMode. +const ( + SummaryModeCompact = SummaryMode(iota) // Compact mode that only displays the total results. + SummaryModeFull // Extended mode that displays total and partial results. + SummaryModeLegacy // Legacy mode, used for backwards compatibility. +) + +// ErrInvalidSummaryMode indicates the serialized summary mode is invalid. +var ErrInvalidSummaryMode = errors.New("invalid summary mode") + +const ( + summaryCompactString = "compact" + summaryFullString = "full" + summaryLegacyString = "legacy" +) + +// MarshalJSON serializes a MetricType as a human readable string. +func (m SummaryMode) MarshalJSON() ([]byte, error) { + txt, err := m.MarshalText() + if err != nil { + return nil, err + } + return []byte(`"` + string(txt) + `"`), nil +} + +// MarshalText serializes a MetricType as a human readable string. +func (m SummaryMode) MarshalText() ([]byte, error) { + switch m { + case SummaryModeCompact: + return []byte(summaryCompactString), nil + case SummaryModeFull: + return []byte(summaryFullString), nil + case SummaryModeLegacy: + return []byte(summaryLegacyString), nil + default: + return nil, ErrInvalidSummaryMode + } +} + +// UnmarshalText deserializes a MetricType from a string representation. +func (m *SummaryMode) UnmarshalText(data []byte) error { + switch string(data) { + case summaryCompactString: + *m = SummaryModeCompact + case summaryFullString: + *m = SummaryModeFull + case summaryLegacyString: + *m = SummaryModeLegacy + default: + return ErrInvalidSummaryMode + } + + return nil +} + +func (m SummaryMode) String() string { + switch m { + case SummaryModeCompact: + return summaryCompactString + case SummaryModeFull: + return summaryFullString + case SummaryModeLegacy: + return summaryLegacyString + default: + return "[INVALID]" + } +} + +// ValidateSummaryMode checks if the provided val is a valid summary mode +func ValidateSummaryMode(val string) (sm SummaryMode, err error) { + if val == "" { + return SummaryModeCompact, nil + } + if err = sm.UnmarshalText([]byte(val)); err != nil { + return 0, err + } + return +} + +// Summary is the data structure that holds all the summary data (thresholds, metrics, checks, etc) +// as well as some other information, like certain rendering options. +type Summary struct { + SummaryThresholds `js:"thresholds"` + SummaryGroup + Scenarios map[string]SummaryGroup + + TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? + NoColor bool // TODO: drop this when noColor is part of the (runtime) options + UIState UIState +} + +// NewSummary instantiates a new empty Summary. +func NewSummary() *Summary { + return &Summary{ + SummaryThresholds: NewSummaryThresholds(), + SummaryGroup: SummaryGroup{ + Metrics: NewSummaryMetrics(), + Groups: make(map[string]SummaryGroup), + }, + Scenarios: make(map[string]SummaryGroup), + } +} + +// SummaryMetricInfo holds the definition of a metric that will be rendered in the summary, +// including the name of the metric, its type (Counter, Trend, etc.) and what contains (data amounts, times, etc.). +type SummaryMetricInfo struct { + Name string + Type string + Contains string +} + +// SummaryMetric holds all the information needed to display a metric in the summary, +// including its definition and its values. +type SummaryMetric struct { + SummaryMetricInfo + Values map[string]float64 +} + +// NewSummaryMetricFrom instantiates a new SummaryMetric for a given metrics.Sink and the metric's info. +func NewSummaryMetricFrom( + info SummaryMetricInfo, sink metrics.Sink, + testDuration time.Duration, summaryTrendStats []string, +) SummaryMetric { + getMetricValues := metricValueGetter(summaryTrendStats) + + return SummaryMetric{ + SummaryMetricInfo: info, + Values: getMetricValues(sink, testDuration), + } +} + +// SummaryMetrics is a collection of SummaryMetric grouped by section (http, network, etc). +type SummaryMetrics struct { + // HTTP contains summary data specific to HTTP metrics and is used + // to produce the summary HTTP subsection's content. + HTTP map[string]SummaryMetric + // Execution contains summary data specific to Execution metrics and is used + // to produce the summary Execution subsection's content. + Execution map[string]SummaryMetric + // Network contains summary data specific to Network metrics and is used + // to produce the summary Network subsection's content. + Network map[string]SummaryMetric + + Browser map[string]SummaryMetric + + WebVitals map[string]SummaryMetric + + Grpc map[string]SummaryMetric + + WebSocket map[string]SummaryMetric `js:"websocket"` + + // Custom contains user-defined metric results as well as extensions metrics + Custom map[string]SummaryMetric +} + +// NewSummaryMetrics instantiates an empty collection of SummaryMetrics. +func NewSummaryMetrics() SummaryMetrics { + return SummaryMetrics{ + HTTP: make(map[string]SummaryMetric), + Execution: make(map[string]SummaryMetric), + Network: make(map[string]SummaryMetric), + Browser: make(map[string]SummaryMetric), + WebVitals: make(map[string]SummaryMetric), + Grpc: make(map[string]SummaryMetric), + WebSocket: make(map[string]SummaryMetric), + Custom: make(map[string]SummaryMetric), + } +} + +// SummaryChecksMetrics is the subset of checks-specific metrics. +type SummaryChecksMetrics struct { + Total SummaryMetric `js:"checks_total"` + Success SummaryMetric `js:"checks_succeeded"` + Fail SummaryMetric `js:"checks_failed"` +} + +// SummaryChecks holds the checks information to be rendered in the summary. +type SummaryChecks struct { + Metrics SummaryChecksMetrics + OrderedChecks []*Check +} + +// NewSummaryChecks instantiates an empty set of SummaryChecks. +func NewSummaryChecks() *SummaryChecks { + initChecksMetricData := func(name string, t metrics.MetricType) SummaryMetric { + return SummaryMetric{ + SummaryMetricInfo: SummaryMetricInfo{ + Name: name, + Type: t.String(), + Contains: metrics.Default.String(), + }, + Values: make(map[string]float64), + } + } + + return &SummaryChecks{ + Metrics: SummaryChecksMetrics{ + Total: initChecksMetricData("checks_total", metrics.Counter), + Success: initChecksMetricData("checks_succeeded", metrics.Rate), + Fail: initChecksMetricData("checks_failed", metrics.Rate), + }, + } +} + +// SummaryThreshold holds the information of a threshold to be rendered in the summary. +type SummaryThreshold struct { + Source string `js:"source"` + Ok bool `js:"ok"` +} + +// MetricThresholds is the collection of SummaryThreshold that belongs to the same metric. +type MetricThresholds struct { + Metric SummaryMetric `js:"metric"` + Thresholds []SummaryThreshold `js:"thresholds"` +} + +// SummaryThresholds is a collection of MetricThresholds that will be rendered in the summary. +type SummaryThresholds map[string]MetricThresholds + +// NewSummaryThresholds instantiates an empty collection of SummaryThresholds. +func NewSummaryThresholds() SummaryThresholds { + thresholds := make(SummaryThresholds) + return thresholds +} + +// SummaryGroup is a group of metrics and subgroups (recursive) that will be rendered in the summary. +type SummaryGroup struct { + Checks *SummaryChecks // Not always present, thus we use a pointer. + Metrics SummaryMetrics + Groups map[string]SummaryGroup +} + +// NewSummaryGroup instantiates an empty SummaryGroup. +func NewSummaryGroup() SummaryGroup { + return SummaryGroup{ + Metrics: NewSummaryMetrics(), + Groups: make(map[string]SummaryGroup), + } +} + +func metricValueGetter(summaryTrendStats []string) func(metrics.Sink, time.Duration) map[string]float64 { + trendResolvers, err := metrics.GetResolversForTrendColumns(summaryTrendStats) + if err != nil { + panic(err.Error()) // this should have been validated already + } + + return func(sink metrics.Sink, t time.Duration) (result map[string]float64) { + switch sink := sink.(type) { + case *metrics.CounterSink: + result = sink.Format(t) + result["rate"] = calculateCounterRate(sink.Value, t) + case *metrics.GaugeSink: + result = sink.Format(t) + result["min"] = sink.Min + result["max"] = sink.Max + case *metrics.RateSink: + result = sink.Format(t) + result["passes"] = float64(sink.Trues) + result["fails"] = float64(sink.Total - sink.Trues) + case *metrics.TrendSink: + result = make(map[string]float64, len(summaryTrendStats)) + for _, col := range summaryTrendStats { + result[col] = trendResolvers[col](sink) + } + } + + return result + } +} + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} + +// LegacySummary contains all the data the summary handler gets. +type LegacySummary struct { + Metrics map[string]*metrics.Metric + RootGroup *Group + TestRunDuration time.Duration // TODO: use lib.ExecutionState-based interface instead? + NoColor bool // TODO: drop this when noColor is part of the (runtime) options + UIState UIState +} diff --git a/output/summary/data.go b/output/summary/data.go new file mode 100644 index 00000000000..d17efcf0303 --- /dev/null +++ b/output/summary/data.go @@ -0,0 +1,355 @@ +package summary + +import ( + "strings" + "sync/atomic" + "time" + + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" +) + +type dataModel struct { + thresholds + aggregatedGroupData + scenarios map[string]aggregatedGroupData +} + +func newDataModel() dataModel { + return dataModel{ + aggregatedGroupData: newAggregatedGroupData(), + scenarios: make(map[string]aggregatedGroupData), + } +} + +func (d *dataModel) groupDataFor(scenario string) aggregatedGroupData { + if groupData, exists := d.scenarios[scenario]; exists { + return groupData + } + d.scenarios[scenario] = newAggregatedGroupData() + return d.scenarios[scenario] +} + +func (d *dataModel) storeThresholdsFor(m *metrics.Metric) { + for _, threshold := range m.Thresholds.Thresholds { + d.thresholds = append(d.thresholds, struct { + *metrics.Threshold + Metric *metrics.Metric + }{Metric: m, Threshold: threshold}) + } +} + +type thresholds []struct { + *metrics.Threshold + Metric *metrics.Metric +} + +type aggregatedGroupData struct { + checks *aggregatedChecksData + aggregatedMetrics aggregatedMetricData + groupsData map[string]aggregatedGroupData +} + +func newAggregatedGroupData() aggregatedGroupData { + return aggregatedGroupData{ + checks: newAggregatedChecksData(), + aggregatedMetrics: make(map[string]aggregatedMetric), + groupsData: make(map[string]aggregatedGroupData), + } +} + +func (a aggregatedGroupData) groupDataFor(group string) aggregatedGroupData { + if groupData, exists := a.groupsData[group]; exists { + return groupData + } + a.groupsData[group] = newAggregatedGroupData() + return a.groupsData[group] +} + +// addSample differs from relayMetricFrom in that it updates the internally stored metric sink with the sample, +// which differs from the original metric sink, while relayMetricFrom stores the metric and the metric sink from +// the sample. +func (a aggregatedGroupData) addSample(sample metrics.Sample) { + a.aggregatedMetrics.addSample(sample) + + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { + check := a.checks.checkFor(checkName) + if sample.Value == 0 { + atomic.AddInt64(&check.Fails, 1) + } else { + atomic.AddInt64(&check.Passes, 1) + } + } +} + +// aggregatedMetricData is a container that can either hold a reference to a k6 metric stored in the registry, or +// hold a pointer to such metric but keeping a separated Sink of values in order to keep an aggregated view of the +// metric values. The latter is useful for tracking aggregated metric values specific to a group or scenario. +type aggregatedMetricData map[string]aggregatedMetric + +// relayMetricFrom stores the metric and the metric sink from the sample. It makes the underlying metric of our +// summary's aggregatedMetricData point directly to a metric in the k6 registry, and relies on that specific pointed +// at metrics internal state for its computations. +func (a aggregatedMetricData) relayMetricFrom(sample metrics.Sample) { + a[sample.Metric.Name] = aggregatedMetric{ + Metric: sample.Metric, + Sink: sample.Metric.Sink, + } +} + +// addSample stores the value of the sample in a separate internal sink completely detached from the underlying metrics. +// This allows to keep an aggregated view of the values specific to a group or scenario. +func (a aggregatedMetricData) addSample(sample metrics.Sample) { + if _, exists := a[sample.Metric.Name]; !exists { + a[sample.Metric.Name] = newAggregatedMetric(sample.Metric) + } + + a[sample.Metric.Name].Sink.Add(sample) +} + +// FIXME (@joan): rename this to make it explicit this is different from an actual k6 metric, and this is used +// only to keep an aggregated view of specific metric-check-group-scenario-thresholds set of values. +type aggregatedMetric struct { + // FIXME (@joan): Drop this and replace it with a concrete copy of the metric data we want to track + // to avoid any potential confusion. + Metric *metrics.Metric + + // FIXME (@joan): Introduce our own way of tracking thresholds, and whether they're crossed or not. + // Without relying on the internal submetrics the engine maintains specifically for thresholds. + // Thresholds []OurThreshold // { crossed: boolean } + + Sink metrics.Sink +} + +func newAggregatedMetric(metric *metrics.Metric) aggregatedMetric { + return aggregatedMetric{ + Metric: metric, + Sink: metrics.NewSink(metric.Type), + } +} + +type aggregatedChecksData struct { + checks map[string]*lib.Check + orderedChecks []*lib.Check +} + +func newAggregatedChecksData() *aggregatedChecksData { + return &aggregatedChecksData{ + checks: make(map[string]*lib.Check), + orderedChecks: make([]*lib.Check, 0), + } +} + +func (a *aggregatedChecksData) checkFor(name string) *lib.Check { + check, ok := a.checks[name] + if !ok { + var err error + check, err = lib.NewCheck(name, &lib.Group{}) // FIXME: Do we really need the group? + if err != nil { + panic(err) // This should never happen + } + a.checks[name] = check + a.orderedChecks = append(a.orderedChecks, check) + } + return check +} + +func populateSummaryGroup( + summaryGroup *lib.SummaryGroup, + groupData aggregatedGroupData, + testRunDuration time.Duration, + summaryTrendStats []string, +) { + // First, we populate the checks metrics, which are treated independently. + populateSummaryChecks(summaryGroup, groupData, testRunDuration, summaryTrendStats) + + // Then, we store the metrics. + storeMetric := func( + dest lib.SummaryMetrics, + info lib.SummaryMetricInfo, + sink metrics.Sink, + testDuration time.Duration, + summaryTrendStats []string, + ) { + summaryMetric := lib.NewSummaryMetricFrom(info, sink, testDuration, summaryTrendStats) + + switch { + case isSkippedMetric(info.Name): + // Do nothing, just skip. + case isHTTPMetric(info.Name): + dest.HTTP[info.Name] = summaryMetric + case isExecutionMetric(info.Name): + dest.Execution[info.Name] = summaryMetric + case isNetworkMetric(info.Name): + dest.Network[info.Name] = summaryMetric + case isBrowserMetric(info.Name): + dest.Browser[info.Name] = summaryMetric + case isGrpcMetric(info.Name): + dest.Grpc[info.Name] = summaryMetric + case isWebSocketsMetric(info.Name): + dest.WebSocket[info.Name] = summaryMetric + case isWebVitalsMetric(info.Name): + dest.WebVitals[info.Name] = summaryMetric + default: + dest.Custom[info.Name] = summaryMetric + } + } + + for _, metricData := range groupData.aggregatedMetrics { + storeMetric( + summaryGroup.Metrics, + lib.SummaryMetricInfo{ + Name: metricData.Metric.Name, + Type: metricData.Metric.Type.String(), + Contains: metricData.Metric.Contains.String(), + }, + metricData.Sink, + testRunDuration, + summaryTrendStats, + ) + } + + // Finally, we keep moving down the hierarchy and populate the nested groups. + for groupName, subGroupData := range groupData.groupsData { + summarySubGroup := lib.NewSummaryGroup() + populateSummaryGroup(&summarySubGroup, subGroupData, testRunDuration, summaryTrendStats) + summaryGroup.Groups[groupName] = summarySubGroup + } +} + +func summaryThresholds( + thresholds thresholds, + testRunDuration time.Duration, + summaryTrendStats []string, +) lib.SummaryThresholds { + rts := make(map[string]lib.MetricThresholds, len(thresholds)) + for _, threshold := range thresholds { + metric := threshold.Metric + + mt, exists := rts[metric.Name] + if !exists { + mt = lib.MetricThresholds{ + Metric: lib.NewSummaryMetricFrom( + lib.SummaryMetricInfo{ + Name: metric.Name, + Type: metric.Type.String(), + Contains: metric.Contains.String(), + }, + metric.Sink, + testRunDuration, + summaryTrendStats, + ), + } + } + + mt.Thresholds = append(mt.Thresholds, lib.SummaryThreshold{ + Source: threshold.Source, + Ok: !threshold.LastFailed, + }) + rts[metric.Name] = mt + } + return rts +} + +// FIXME: This function is a bit flurry, we should consider refactoring it. +// For instance, it would be possible to directly construct these metrics on-the-fly. +func populateSummaryChecks( + summaryGroup *lib.SummaryGroup, + groupData aggregatedGroupData, + testRunDuration time.Duration, + summaryTrendStats []string, +) { + checksMetric, exists := groupData.aggregatedMetrics[metrics.ChecksName] + if !exists { + return + } + + summaryGroup.Checks = lib.NewSummaryChecks() + + totalChecks := float64(checksMetric.Sink.(*metrics.RateSink).Total) //nolint:forcetypeassert + successChecks := float64(checksMetric.Sink.(*metrics.RateSink).Trues) //nolint:forcetypeassert + + summaryGroup.Checks.Metrics.Total.Values["count"] = totalChecks + summaryGroup.Checks.Metrics.Total.Values["rate"] = calculateCounterRate(totalChecks, testRunDuration) + + summaryGroup.Checks.Metrics.Success = lib.NewSummaryMetricFrom( + lib.SummaryMetricInfo{ + Name: "checks_succeeded", + Type: checksMetric.Metric.Type.String(), + Contains: checksMetric.Metric.Contains.String(), + }, + checksMetric.Sink, + testRunDuration, + summaryTrendStats, + ) + + summaryGroup.Checks.Metrics.Fail.Values["passes"] = totalChecks - successChecks + summaryGroup.Checks.Metrics.Fail.Values["fails"] = successChecks + summaryGroup.Checks.Metrics.Fail.Values["rate"] = (totalChecks - successChecks) / totalChecks + + summaryGroup.Checks.OrderedChecks = groupData.checks.orderedChecks +} + +func isHTTPMetric(metricName string) bool { + return oneOfMetrics(metricName, + metrics.HTTPReqsName, + metrics.HTTPReqFailedName, + metrics.HTTPReqDurationName, + metrics.HTTPReqBlockedName, + metrics.HTTPReqConnectingName, + metrics.HTTPReqTLSHandshakingName, + metrics.HTTPReqSendingName, + metrics.HTTPReqWaitingName, + metrics.HTTPReqReceivingName, + ) +} + +func isExecutionMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.VUsName, + metrics.VUsMaxName, + metrics.IterationsName, + metrics.IterationDurationName, + metrics.DroppedIterationsName, + ) +} + +func isNetworkMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.DataSentName, metrics.DataReceivedName) +} + +func isBrowserMetric(metricName string) bool { + return strings.HasPrefix(metricName, "browser_") && !isWebVitalsMetric(metricName) +} + +func isWebVitalsMetric(metricName string) bool { + return strings.HasPrefix(metricName, "browser_web_vital_") +} + +func isGrpcMetric(metricName string) bool { + return strings.HasPrefix(metricName, "grpc_") +} + +func isWebSocketsMetric(metricName string) bool { + return strings.HasPrefix(metricName, "ws_") +} + +func isSkippedMetric(metricName string) bool { + return oneOfMetrics(metricName, metrics.ChecksName, metrics.GroupDurationName) +} + +func oneOfMetrics(metricName string, values ...string) bool { + for _, v := range values { + if strings.HasPrefix(metricName, v) { + return true + } + } + return false +} + +func calculateCounterRate(count float64, duration time.Duration) float64 { + if duration == 0 { + return 0 + } + return count / (float64(duration) / float64(time.Second)) +} diff --git a/output/summary/doc.go b/output/summary/doc.go new file mode 100644 index 00000000000..8cb8857456b --- /dev/null +++ b/output/summary/doc.go @@ -0,0 +1,4 @@ +/* +Package summary implements an output that collects metrics to be displayed in the end-of-test summary +*/ +package summary diff --git a/output/summary/summary.go b/output/summary/summary.go new file mode 100644 index 00000000000..17c10a8cf77 --- /dev/null +++ b/output/summary/summary.go @@ -0,0 +1,195 @@ +package summary + +import ( + "strings" + "sync/atomic" + "time" + + "go.k6.io/k6/lib" + "go.k6.io/k6/metrics" + "go.k6.io/k6/output" + + "github.com/sirupsen/logrus" +) + +const flushPeriod = 200 * time.Millisecond // TODO: make this configurable + +var _ output.Output = &Output{} + +// Output ... +type Output struct { + output.SampleBuffer + + periodicFlusher *output.PeriodicFlusher + logger logrus.FieldLogger + + dataModel dataModel + summaryMode lib.SummaryMode +} + +// New returns a new JSON output. +func New(params output.Params) (*Output, error) { + sm, err := lib.ValidateSummaryMode(params.RuntimeOptions.SummaryMode.String) + if err != nil { + return nil, err + } + + return &Output{ + logger: params.Logger.WithFields(logrus.Fields{ + "output": "summary", + }), + dataModel: newDataModel(), + summaryMode: sm, + }, nil +} + +// OutputName is the name of the output. +const OutputName = "summary" + +// Description returns a human-readable description of the output. +func (o *Output) Description() string { + return OutputName +} + +// Start starts a new output.PeriodicFlusher to collect and flush metrics that will be +// rendered in the end-of-test summary. +func (o *Output) Start() error { + pf, err := output.NewPeriodicFlusher(flushPeriod, o.flushMetrics) + if err != nil { + return err + } + o.logger.Debug("Started!") + o.periodicFlusher = pf + return nil +} + +// Stop flushes any remaining metrics and stops the goroutine. +func (o *Output) Stop() error { + o.periodicFlusher.Stop() + return nil +} + +func (o *Output) flushMetrics() { + samples := o.GetBufferedSamples() + for _, sc := range samples { + samples := sc.GetSamples() + for _, sample := range samples { + o.flushSample(sample) + } + } +} + +func (o *Output) flushSample(sample metrics.Sample) { + // First, the sample data is stored into the metrics stored at the k6 metrics registry level. + o.storeSample(sample) + + skipGroupSamples := o.summaryMode == lib.SummaryModeCompact || o.summaryMode == lib.SummaryModeLegacy + if skipGroupSamples { + return + } + + // Then, if the extended mode is enabled, the sample data is stored into each group metrics. + // However, we need to determine whether the groups tree is within a scenario or not. + groupData := o.dataModel.aggregatedGroupData + if scenarioName, hasScenario := sample.Tags.Get("scenario"); hasScenario && scenarioName != "default" { + groupData = o.dataModel.groupDataFor(scenarioName) + groupData.addSample(sample) + } + + if groupTag, exists := sample.Tags.Get("group"); exists && len(groupTag) > 0 { + normalizedGroupName := strings.TrimPrefix(groupTag, lib.GroupSeparator) + groupNames := strings.Split(normalizedGroupName, lib.GroupSeparator) + + // We traverse over all the groups to create a nested structure, + // but we only add the sample to the group the sample belongs to, + // cause by definition every group is independent. + for _, groupName := range groupNames { + groupData.groupDataFor(groupName) + groupData = groupData.groupsData[groupName] + } + groupData.addSample(sample) + } +} + +// Summary returns a lib.Summary of the test run. +func (o *Output) Summary( + executionState *lib.ExecutionState, + observedMetrics map[string]*metrics.Metric, + options lib.Options, +) *lib.Summary { + testRunDuration := executionState.GetCurrentTestRunDuration() + + summary := lib.NewSummary() + summary.TestRunDuration = testRunDuration + + summaryTrendStats := options.SummaryTrendStats + + // Process the observed metrics. This is necessary to ensure that we have collected + // all metrics, even those that have no samples, so that we can render them in the summary. + o.processObservedMetrics(observedMetrics) + + // Populate the thresholds. + summary.SummaryThresholds = summaryThresholds(o.dataModel.thresholds, testRunDuration, summaryTrendStats) + + // Populate root group and nested groups recursively. + populateSummaryGroup( + &summary.SummaryGroup, + o.dataModel.aggregatedGroupData, + testRunDuration, + summaryTrendStats, + ) + + // Populate scenario groups and nested groups recursively. + for scenarioName, scenarioData := range o.dataModel.scenarios { + scenarioSummaryGroup := lib.NewSummaryGroup() + populateSummaryGroup( + &scenarioSummaryGroup, + scenarioData, + testRunDuration, + summaryTrendStats, + ) + summary.Scenarios[scenarioName] = scenarioSummaryGroup + } + + return summary +} + +// storeSample relays the sample to the k6 metrics registry relevant metric. +// +// If it's a check-specific metric, it will also update the check's pass/fail counters. +func (o *Output) storeSample(sample metrics.Sample) { + // If it's the first time we see this metric, we relay the metric from the sample + // and, we store the thresholds for that particular metric, and its sub-metrics. + if _, exists := o.dataModel.aggregatedMetrics[sample.Metric.Name]; !exists { + o.dataModel.aggregatedMetrics.relayMetricFrom(sample) + + o.dataModel.storeThresholdsFor(sample.Metric) + for _, sub := range sample.Metric.Submetrics { + o.dataModel.storeThresholdsFor(sub.Metric) + } + } + + checkName, hasCheckTag := sample.Tags.Get(metrics.TagCheck.String()) + if hasCheckTag && sample.Metric.Name == metrics.ChecksName { + check := o.dataModel.checks.checkFor(checkName) + if sample.Value == 0 { + atomic.AddInt64(&check.Fails, 1) + } else { + atomic.AddInt64(&check.Passes, 1) + } + } +} + +// processObservedMetrics is responsible for ensuring that we have collected +// all metrics, even those that have no samples, so that we can render them in the summary. +func (o *Output) processObservedMetrics(observedMetrics map[string]*metrics.Metric) { + for _, m := range observedMetrics { + if _, exists := o.dataModel.aggregatedMetrics[m.Name]; !exists { + o.dataModel.aggregatedMetrics[m.Name] = aggregatedMetric{ + Metric: m, + Sink: m.Sink, + } + o.dataModel.storeThresholdsFor(m) + } + } +}