From 80ad061a6b06b317f70dbfdec9b6a0cc389fa8c1 Mon Sep 17 00:00:00 2001 From: Jeff Lambert Date: Wed, 11 Nov 2020 20:34:02 -0500 Subject: [PATCH] Add flag to set max-bg-workers argument Users may have reason to change the number of background workers that are used and therefore should be able to override the default (8) that tune suggests. This PR adds that flag and adjusts relevant parameters accordingly. --- .gitignore | 2 + README.md | 5 ++ cmd/timescaledb-tune/main.go | 2 + pkg/pgtune/memory_test.go | 7 +- pkg/pgtune/misc.go | 3 + pkg/pgtune/misc_test.go | 2 +- pkg/pgtune/parallel.go | 27 ++++--- pkg/pgtune/parallel_test.go | 142 +++++++++++++++++++++----------- pkg/pgtune/tune.go | 14 +++- pkg/pgtune/tune_test.go | 22 ++++- pkg/tstune/tuner.go | 35 ++++---- pkg/tstune/tuner_test.go | 151 ++++++++++++++++++++--------------- 12 files changed, 263 insertions(+), 149 deletions(-) diff --git a/.gitignore b/.gitignore index a550c0a..2803bbf 100644 --- a/.gitignore +++ b/.gitignore @@ -6,12 +6,14 @@ *.dll *.so *.dylib +timescaledb-tune # Test binary, built with `go test -c` *.test # Output of the go coverage tool, specifically when used with LiteIDE *.out +coverage.txt # Popular IDEs .idea/ diff --git a/README.md b/README.md index eb8cb19..5150cfd 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,11 @@ If you want recommendations for a specific amount of memory and/or CPUs: $ timescaledb-tune --memory="4GB" --cpus=2 ``` +If you want to set a specific number of background workers (`timescaledb.max_background_workers`): +```bash +$ timescaledb-tune --max-bg-workers=16 +``` + If you have a dedicated disk for WAL, or want to specify how much of a shared disk should be used for WAL: ```bash diff --git a/cmd/timescaledb-tune/main.go b/cmd/timescaledb-tune/main.go index 231bf80..b5d37a5 100644 --- a/cmd/timescaledb-tune/main.go +++ b/cmd/timescaledb-tune/main.go @@ -15,6 +15,7 @@ import ( "runtime" "strings" + "github.com/timescale/timescaledb-tune/pkg/pgtune" "github.com/timescale/timescaledb-tune/pkg/tstune" ) @@ -35,6 +36,7 @@ func init() { flag.StringVar(&f.PGVersion, "pg-version", "", "Major version of PostgreSQL to base recommendations on. Default is determined via pg_config. Valid values: "+strings.Join(tstune.ValidPGVersions, ", ")) flag.StringVar(&f.WALDiskSize, "wal-disk-size", "", "Size of the disk where the WAL resides, in PostgreSQL format , e.g., 4GB. Using this flag helps tune WAL behavior.") flag.Uint64Var(&f.MaxConns, "max-conns", 0, "Max number of connections for the database. Default is equal to our best recommendation") + flag.IntVar(&f.MaxBGWorkers, "max-bg-workers", pgtune.MaxBackgroundWorkersDefault, "Max number of background workers") flag.StringVar(&f.ConfPath, "conf-path", "", "Path to postgresql.conf. If blank, heuristics will be used to find it") flag.StringVar(&f.DestPath, "out-path", "", "Path to write the new configuration file. If blank, will use the same file that is read from") flag.StringVar(&f.PGConfig, "pg-config", "pg_config", "Path to the pg_config binary") diff --git a/pkg/pgtune/memory_test.go b/pkg/pgtune/memory_test.go index da2ecd3..f0e48bd 100644 --- a/pkg/pgtune/memory_test.go +++ b/pkg/pgtune/memory_test.go @@ -14,20 +14,19 @@ import ( // unaffected by number of CPUs and max connections; the exception is work_mem, // so the adjustment is done in the init function var memoryToBaseVals = map[uint64]map[string]uint64{ - 10 * parse.Gigabyte: map[string]uint64{ + 10 * parse.Gigabyte: { SharedBuffersKey: 2560 * parse.Megabyte, EffectiveCacheKey: 7680 * parse.Megabyte, MaintenanceWorkMemKey: 1280 * parse.Megabyte, WorkMemKey: 64 * parse.Megabyte, }, - 12 * parse.Gigabyte: map[string]uint64{ + 12 * parse.Gigabyte: { SharedBuffersKey: 3 * parse.Gigabyte, EffectiveCacheKey: 9 * parse.Gigabyte, MaintenanceWorkMemKey: 1536 * parse.Megabyte, WorkMemKey: 78643 * parse.Kilobyte, }, - 32 * parse.Gigabyte: map[string]uint64{ - + 32 * parse.Gigabyte: { SharedBuffersKey: 8 * parse.Gigabyte, EffectiveCacheKey: 24 * parse.Gigabyte, MaintenanceWorkMemKey: maintenanceWorkMemLimit, diff --git a/pkg/pgtune/misc.go b/pkg/pgtune/misc.go index c2cdf99..58ac2ae 100644 --- a/pkg/pgtune/misc.go +++ b/pkg/pgtune/misc.go @@ -32,6 +32,9 @@ const ( // MaxConnectionsDefault is the recommended default value for max_connections. const MaxConnectionsDefault uint64 = 100 +// MaxBackgroundWorkersDefault is the recommended default value for timescaledb.max_background_workers. +const MaxBackgroundWorkersDefault int = 8 + // getMaxConns gives a default amount of connections based on a memory step // function. func getMaxConns(totalMemory uint64) uint64 { diff --git a/pkg/pgtune/misc_test.go b/pkg/pgtune/misc_test.go index 6306352..3f60ed6 100644 --- a/pkg/pgtune/misc_test.go +++ b/pkg/pgtune/misc_test.go @@ -153,7 +153,7 @@ func TestMiscRecommenderRecommendPanic(t *testing.T) { func TestMiscSettingsGroup(t *testing.T) { for totalMemory, outerMatrix := range miscSettingsMatrix { for maxConns, matrix := range outerMatrix { - config, err := NewSystemConfig(totalMemory, 8, "10", walDiskUnset, maxConns) + config, err := NewSystemConfig(totalMemory, 8, "10", walDiskUnset, maxConns, MaxBackgroundWorkersDefault) if err != nil { t.Errorf("unexpected error on system config creation: got %v", err) } diff --git a/pkg/pgtune/parallel.go b/pkg/pgtune/parallel.go index 783c1bb..40f6066 100644 --- a/pkg/pgtune/parallel.go +++ b/pkg/pgtune/parallel.go @@ -12,10 +12,10 @@ const ( MaxParallelWorkersGatherKey = "max_parallel_workers_per_gather" MaxParallelWorkers = "max_parallel_workers" // pg10+ - defaultMaxBackgroundWorkers = 8 // This may be more dynamic in the future - minBuiltInProcesses = 3 // at least checkpointer, WALwriter, vacuum + minBuiltInProcesses = 3 // at least checkpointer, WALwriter, vacuum - errOneCPU = "cannot make recommendations with just 1 CPU" + errOneCPU = "cannot make recommendations with just 1 CPU" + errWorkers = "cannot make recommendations with less than %d workers" ) // ParallelLabel is the label used to refer to the parallelism settings group @@ -31,13 +31,14 @@ var ParallelKeys = []string{ // ParallelRecommender gives recommendations for ParallelKeys based on system resources. type ParallelRecommender struct { - cpus int + cpus int + maxBGWorkers int } // NewParallelRecommender returns a ParallelRecommender that recommends based on // the given number of cpus. -func NewParallelRecommender(cpus int) *ParallelRecommender { - return &ParallelRecommender{cpus} +func NewParallelRecommender(cpus, maxBGWorkers int) *ParallelRecommender { + return &ParallelRecommender{cpus, maxBGWorkers} } // IsAvailable returns whether this Recommender is usable given the system @@ -53,17 +54,20 @@ func (r *ParallelRecommender) Recommend(key string) string { if r.cpus <= 1 { panic(errOneCPU) } + if r.maxBGWorkers < MaxBackgroundWorkersDefault { + panic(fmt.Sprintf(errWorkers, MaxBackgroundWorkersDefault)) + } if key == MaxWorkerProcessesKey { // Need enough processes to handle built-ins (e.g., autovacuum), // TimescaleDB background workers, and the number of parallel workers // (equal to the number of CPUs). - val = fmt.Sprintf("%d", minBuiltInProcesses+defaultMaxBackgroundWorkers+r.cpus) + val = fmt.Sprintf("%d", minBuiltInProcesses+r.maxBGWorkers+r.cpus) } else if key == MaxParallelWorkers { val = fmt.Sprintf("%d", r.cpus) } else if key == MaxParallelWorkersGatherKey { val = fmt.Sprintf("%d", int(math.Round(float64(r.cpus)/2.0))) } else if key == MaxBackgroundWorkers { - val = fmt.Sprintf("%d", defaultMaxBackgroundWorkers) + val = fmt.Sprintf("%d", r.maxBGWorkers) } else { panic(fmt.Sprintf("unknown key: %s", key)) } @@ -72,8 +76,9 @@ func (r *ParallelRecommender) Recommend(key string) string { // ParallelSettingsGroup is the SettingsGroup to represent parallelism settings. type ParallelSettingsGroup struct { - pgVersion string - cpus int + pgVersion string + cpus int + maxBGWorkers int } // Label should always return the value ParallelLabel. @@ -89,5 +94,5 @@ func (sg *ParallelSettingsGroup) Keys() []string { // GetRecommender should return a new ParallelRecommender. func (sg *ParallelSettingsGroup) GetRecommender() Recommender { - return NewParallelRecommender(sg.cpus) + return NewParallelRecommender(sg.cpus, sg.maxBGWorkers) } diff --git a/pkg/pgtune/parallel_test.go b/pkg/pgtune/parallel_test.go index f608c23..f400572 100644 --- a/pkg/pgtune/parallel_test.go +++ b/pkg/pgtune/parallel_test.go @@ -4,71 +4,105 @@ import ( "fmt" "math/rand" "testing" + "time" "github.com/timescale/timescaledb-tune/pkg/pgutils" ) // parallelSettingsMatrix stores the test cases for ParallelRecommender along // with the expected values for its keys -var parallelSettingsMatrix = map[int]map[string]string{ - 2: map[string]string{ - MaxBackgroundWorkers: fmt.Sprintf("%d", defaultMaxBackgroundWorkers), - MaxWorkerProcessesKey: fmt.Sprintf("%d", 2+minBuiltInProcesses+defaultMaxBackgroundWorkers), - MaxParallelWorkersGatherKey: "1", - MaxParallelWorkers: "2", +var parallelSettingsMatrix = map[int]map[int]map[string]string{ + 2: { + MaxBackgroundWorkersDefault: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 2+minBuiltInProcesses+MaxBackgroundWorkersDefault), + MaxParallelWorkersGatherKey: "1", + MaxParallelWorkers: "2", + }, + MaxBackgroundWorkersDefault * 2: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault*2), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 2+minBuiltInProcesses+MaxBackgroundWorkersDefault*2), + MaxParallelWorkersGatherKey: "1", + MaxParallelWorkers: "2", + }, }, - 4: map[string]string{ - MaxBackgroundWorkers: fmt.Sprintf("%d", defaultMaxBackgroundWorkers), - MaxWorkerProcessesKey: fmt.Sprintf("%d", 4+minBuiltInProcesses+defaultMaxBackgroundWorkers), - MaxParallelWorkersGatherKey: "2", - MaxParallelWorkers: "4", + 4: { + MaxBackgroundWorkersDefault: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 4+minBuiltInProcesses+MaxBackgroundWorkersDefault), + MaxParallelWorkersGatherKey: "2", + MaxParallelWorkers: "4", + }, + MaxBackgroundWorkersDefault * 4: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault*4), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 4+minBuiltInProcesses+MaxBackgroundWorkersDefault*4), + MaxParallelWorkersGatherKey: "2", + MaxParallelWorkers: "4", + }, }, - 5: map[string]string{ - MaxBackgroundWorkers: fmt.Sprintf("%d", defaultMaxBackgroundWorkers), - MaxWorkerProcessesKey: fmt.Sprintf("%d", 5+minBuiltInProcesses+defaultMaxBackgroundWorkers), - MaxParallelWorkersGatherKey: "3", - MaxParallelWorkers: "5", + 5: { + MaxBackgroundWorkersDefault: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 5+minBuiltInProcesses+MaxBackgroundWorkersDefault), + MaxParallelWorkersGatherKey: "3", + MaxParallelWorkers: "5", + }, + MaxBackgroundWorkersDefault * 5: { + MaxBackgroundWorkers: fmt.Sprintf("%d", MaxBackgroundWorkersDefault*5), + MaxWorkerProcessesKey: fmt.Sprintf("%d", 5+minBuiltInProcesses+MaxBackgroundWorkersDefault*5), + MaxParallelWorkersGatherKey: "3", + MaxParallelWorkers: "5", + }, }, } func TestNewParallelRecommender(t *testing.T) { + rand.Seed(time.Now().UnixNano()) for i := 0; i < 1000000; i++ { cpus := rand.Intn(128) - r := NewParallelRecommender(cpus) + // ensure a minimum of background workers + workers := rand.Intn(128-MaxBackgroundWorkersDefault+1) + MaxBackgroundWorkersDefault + r := NewParallelRecommender(cpus, workers) if r == nil { t.Errorf("unexpected nil recommender") } if got := r.cpus; got != cpus { t.Errorf("recommender has incorrect cpus: got %d want %d", got, cpus) } + if got := r.maxBGWorkers; got != workers { + t.Errorf("recommender has incorrect workers: got %d want %d", got, workers) + } } } func TestParallelRecommenderIsAvailable(t *testing.T) { - if r := NewParallelRecommender(0); r.IsAvailable() { + if r := NewParallelRecommender(0, MaxBackgroundWorkersDefault); r.IsAvailable() { t.Errorf("unexpectedly available for 0 cpus") } - if r := NewParallelRecommender(1); r.IsAvailable() { + if r := NewParallelRecommender(1, MaxBackgroundWorkersDefault); r.IsAvailable() { t.Errorf("unexpectedly available for 1 cpus") } for i := 2; i < 1000; i++ { - if r := NewParallelRecommender(i); !r.IsAvailable() { + if r := NewParallelRecommender(i, MaxBackgroundWorkersDefault); !r.IsAvailable() { t.Errorf("unexpected UNavailable for %d cpus", i) } } } func TestParallelRecommenderRecommend(t *testing.T) { - for cpus, matrix := range parallelSettingsMatrix { - r := &ParallelRecommender{cpus} - testRecommender(t, r, ParallelKeys, matrix) + for cpus, tempMatrix := range parallelSettingsMatrix { + for workers, matrix := range tempMatrix { + r := &ParallelRecommender{cpus, workers} + testRecommender(t, r, ParallelKeys, matrix) + } } } func TestParallelRecommenderRecommendPanics(t *testing.T) { + // test invalid key panic func() { - r := &ParallelRecommender{5} + r := &ParallelRecommender{5, MaxBackgroundWorkersDefault} defer func() { if re := recover(); re == nil { t.Errorf("did not panic when should") @@ -77,44 +111,58 @@ func TestParallelRecommenderRecommendPanics(t *testing.T) { r.Recommend("foo") }() + // test invalid CPU panic func() { - r := &ParallelRecommender{1} defer func() { if re := recover(); re == nil { t.Errorf("did not panic when should") } }() + r := &ParallelRecommender{1, MaxBackgroundWorkersDefault} + r.Recommend("foo") + }() + + // test invalid worker panic + func() { + defer func() { + if re := recover(); re == nil { + t.Errorf("did not panic when should") + } + }() + r := &ParallelRecommender{5, MaxBackgroundWorkersDefault - 1} r.Recommend("foo") }() } func TestParallelSettingsGroup(t *testing.T) { keyCount := len(ParallelKeys) - for cpus, matrix := range parallelSettingsMatrix { - config := getDefaultTestSystemConfig(t) - config.CPUs = cpus - config.PGMajorVersion = pgutils.MajorVersion96 // 9.6 lacks one key - sg := GetSettingsGroup(ParallelLabel, config) - if got := len(sg.Keys()); got != keyCount-1 { - t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion96, got, keyCount-1) - } - testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) + for cpus, tempMatrix := range parallelSettingsMatrix { + for workers, matrix := range tempMatrix { + config := getDefaultTestSystemConfig(t) + config.CPUs = cpus + config.PGMajorVersion = pgutils.MajorVersion96 // 9.6 lacks one key + config.MaxBGWorkers = workers + sg := GetSettingsGroup(ParallelLabel, config) + if got := len(sg.Keys()); got != keyCount-1 { + t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion96, got, keyCount-1) + } + testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) - // PG10 adds a key - config.PGMajorVersion = pgutils.MajorVersion10 - sg = GetSettingsGroup(ParallelLabel, config) - if got := len(sg.Keys()); got != keyCount { - t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion10, got, keyCount) - } - testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) + // PG10 adds a key + config.PGMajorVersion = pgutils.MajorVersion10 + sg = GetSettingsGroup(ParallelLabel, config) + if got := len(sg.Keys()); got != keyCount { + t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion10, got, keyCount) + } + testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) - config.PGMajorVersion = pgutils.MajorVersion11 - sg = GetSettingsGroup(ParallelLabel, config) - if got := len(sg.Keys()); got != keyCount { - t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion11, got, keyCount) + config.PGMajorVersion = pgutils.MajorVersion11 + sg = GetSettingsGroup(ParallelLabel, config) + if got := len(sg.Keys()); got != keyCount { + t.Errorf("incorrect number of keys for PG %s: got %d want %d", pgutils.MajorVersion11, got, keyCount) + } + testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) } - testSettingGroup(t, sg, matrix, ParallelLabel, ParallelKeys) - } } diff --git a/pkg/pgtune/tune.go b/pkg/pgtune/tune.go index cd4a0e8..d9a1ae1 100644 --- a/pkg/pgtune/tune.go +++ b/pkg/pgtune/tune.go @@ -6,8 +6,9 @@ package pgtune import "fmt" const ( - osWindows = "windows" - errMaxConnsTooLowFmt = "maxConns must be 0 OR >= %d: got %d" + osWindows = "windows" + errMaxConnsTooLowFmt = "maxConns must be 0 OR >= %d: got %d" + errMaxBGWorkersTooLowFmt = "maxBGWorkers must be >= %d: got %d" ) // Recommender is an interface that gives setting recommendations for a given @@ -39,19 +40,24 @@ type SystemConfig struct { PGMajorVersion string WALDiskSize uint64 maxConns uint64 + MaxBGWorkers int } // NewSystemConfig returns a new SystemConfig with the given parameters. -func NewSystemConfig(totalMemory uint64, cpus int, pgVersion string, walDiskSize uint64, maxConns uint64) (*SystemConfig, error) { +func NewSystemConfig(totalMemory uint64, cpus int, pgVersion string, walDiskSize uint64, maxConns uint64, maxBGWorkers int) (*SystemConfig, error) { if maxConns != 0 && maxConns < minMaxConns { return nil, fmt.Errorf(errMaxConnsTooLowFmt, minMaxConns, maxConns) } + if maxBGWorkers < MaxBackgroundWorkersDefault { + return nil, fmt.Errorf(errMaxBGWorkersTooLowFmt, MaxBackgroundWorkersDefault, maxBGWorkers) + } return &SystemConfig{ Memory: totalMemory, CPUs: cpus, PGMajorVersion: pgVersion, WALDiskSize: walDiskSize, maxConns: maxConns, + MaxBGWorkers: maxBGWorkers, }, nil } @@ -62,7 +68,7 @@ func GetSettingsGroup(label string, config *SystemConfig) SettingsGroup { case label == MemoryLabel: return &MemorySettingsGroup{config.Memory, config.CPUs, config.maxConns} case label == ParallelLabel: - return &ParallelSettingsGroup{config.PGMajorVersion, config.CPUs} + return &ParallelSettingsGroup{config.PGMajorVersion, config.CPUs, config.MaxBGWorkers} case label == WALLabel: return &WALSettingsGroup{config.Memory, config.WALDiskSize} case label == MiscLabel: diff --git a/pkg/pgtune/tune_test.go b/pkg/pgtune/tune_test.go index 0ea9c1b..1b649e3 100644 --- a/pkg/pgtune/tune_test.go +++ b/pkg/pgtune/tune_test.go @@ -13,7 +13,7 @@ const ( ) func getDefaultTestSystemConfig(t *testing.T) *SystemConfig { - config, err := NewSystemConfig(1024, 4, "10", walDiskUnset, testMaxConns) + config, err := NewSystemConfig(1024, 4, "10", walDiskUnset, testMaxConns, MaxBackgroundWorkersDefault) if err != nil { t.Errorf("unexpected error: got %v", err) } @@ -29,7 +29,7 @@ func TestNewSystemConfig(t *testing.T) { pgVersion = "9.6" } - config, err := NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConns) + config, err := NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConns, MaxBackgroundWorkersDefault) if err != nil { t.Errorf("unexpected error: got %v", err) } @@ -45,8 +45,12 @@ func TestNewSystemConfig(t *testing.T) { if config.maxConns != testMaxConns { t.Errorf("incorrect max conns: got %d want %d", config.maxConns, testMaxConns) } + if config.MaxBGWorkers != MaxBackgroundWorkersDefault { + t.Errorf("incorrect max background workers: got %d want %d", config.MaxBGWorkers, MaxBackgroundWorkersDefault) + } - _, err = NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConnsBad) + // test invalid number of connections + _, err = NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConnsBad, MaxBackgroundWorkersDefault) wantErr := fmt.Sprintf(errMaxConnsTooLowFmt, minMaxConns, testMaxConnsBad) if err == nil { t.Errorf("unexpected lack of error") @@ -54,7 +58,8 @@ func TestNewSystemConfig(t *testing.T) { t.Errorf("unexpected error: got\n%s\nwant\n%s", got, wantErr) } - config, err = NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConnsSpecial) + // test 0 connections + config, err = NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConnsSpecial, MaxBackgroundWorkersDefault) if err != nil { t.Errorf("unexpected error: got %v", err) } @@ -62,6 +67,15 @@ func TestNewSystemConfig(t *testing.T) { t.Errorf("incorrect max conns: got %d want %d", config.maxConns, testMaxConnsSpecial) } + // test invalid number of background workers + _, err = NewSystemConfig(mem, cpus, pgVersion, walDiskUnset, testMaxConns, MaxBackgroundWorkersDefault-1) + wantErr = fmt.Sprintf(errMaxBGWorkersTooLowFmt, MaxBackgroundWorkersDefault, MaxBackgroundWorkersDefault-1) + if err == nil { + t.Errorf("unexpected lack of error") + } else if got := err.Error(); got != wantErr { + t.Errorf("unexpected error: got\n%s\nwant\n%s", got, wantErr) + } + } } diff --git a/pkg/tstune/tuner.go b/pkg/tstune/tuner.go index 9675e63..a36fb6e 100644 --- a/pkg/tstune/tuner.go +++ b/pkg/tstune/tuner.go @@ -71,19 +71,20 @@ var filepathAbsFn = filepath.Abs // TunerFlags are the flags that control how a Tuner object behaves when it is run. type TunerFlags struct { - Memory string // amount of memory to base recommendations on - NumCPUs uint // number of CPUs to base recommendations on - WALDiskSize string // disk size of WAL to base recommendations on - PGVersion string // major version of PostgreSQL to base recommendations on - PGConfig string // path to pg_config binary - MaxConns uint64 // max number of database connections - ConfPath string // path to the postgresql.conf file - DestPath string // path to output file - YesAlways bool // always respond yes to prompts - Quiet bool // show only the bare necessities - UseColor bool // use color in output - DryRun bool // whether to actual persist changes to disk - Restore bool // whether to restore a backup + Memory string // amount of memory to base recommendations on + NumCPUs uint // number of CPUs to base recommendations on + WALDiskSize string // disk size of WAL to base recommendations on + PGVersion string // major version of PostgreSQL to base recommendations on + PGConfig string // path to pg_config binary + MaxConns uint64 // max number of database connections + MaxBGWorkers int // max number of background workers + ConfPath string // path to the postgresql.conf file + DestPath string // path to output file + YesAlways bool // always respond yes to prompts + Quiet bool // show only the bare necessities + UseColor bool // use color in output + DryRun bool // whether to actual persist changes to disk + Restore bool // whether to restore a backup } // Tuner represents the tuning program for TimescaleDB. @@ -160,7 +161,13 @@ func (t *Tuner) initializeSystemConfig() (*pgtune.SystemConfig, error) { cpus = runtime.NumCPU() } - return pgtune.NewSystemConfig(totalMemory, cpus, pgVersion, walDisk, t.flags.MaxConns) + // Use default BG Workers if not provided + maxBGWorkers := int(t.flags.MaxBGWorkers) + if t.flags.MaxBGWorkers == 0 { + maxBGWorkers = pgtune.MaxBackgroundWorkersDefault + } + + return pgtune.NewSystemConfig(totalMemory, cpus, pgVersion, walDisk, t.flags.MaxConns, maxBGWorkers) } func (t *Tuner) restore(r restorer, filePath string) error { diff --git a/pkg/tstune/tuner_test.go b/pkg/tstune/tuner_test.go index 33c722b..82c4b90 100644 --- a/pkg/tstune/tuner_test.go +++ b/pkg/tstune/tuner_test.go @@ -82,17 +82,19 @@ func TestTunerInitializeSystemConfig(t *testing.T) { okPGConfig := "pg_config" okPGVersion := pgutils.MajorVersion11 cases := []struct { - desc string - flagPGConfig string - flagMemory string - flagNumCPUs uint - flagPGVersion string - flagWALDisk string - wantMemory uint64 - wantCPUs int - wantPGVersion string - wantWALDisk uint64 - errMsg string + desc string + flagPGConfig string + flagMemory string + flagNumCPUs uint + flagMaxBGWorkers int + flagPGVersion string + flagWALDisk string + wantMemory uint64 + wantCPUs int + wantMaxBGWorkers int + wantPGVersion string + wantWALDisk uint64 + errMsg string }{ { desc: "bad pgconfig flag", @@ -117,53 +119,69 @@ func TestTunerInitializeSystemConfig(t *testing.T) { errMsg: "incorrect PostgreSQL bytes format: '400 gigs'", }, { - desc: "use mem flag only", - flagPGConfig: okPGConfig, - flagMemory: "1" + parse.GB, - wantMemory: 1 * parse.Gigabyte, - wantCPUs: runtime.NumCPU(), - wantPGVersion: okPGVersion, - }, - { - desc: "use cpu flag only", - flagPGConfig: okPGConfig, - flagNumCPUs: 2, - wantMemory: totalMemory, - wantCPUs: 2, - wantPGVersion: okPGVersion, - }, - { - desc: "use pg-version flag only", - flagPGVersion: pgutils.MajorVersion10, - wantMemory: totalMemory, - wantCPUs: runtime.NumCPU(), - wantPGVersion: pgutils.MajorVersion10, - }, - { - desc: "use wal-disk flag only", - flagPGConfig: okPGConfig, - flagWALDisk: "4GB", - wantMemory: totalMemory, - wantCPUs: runtime.NumCPU(), - wantPGVersion: okPGVersion, - wantWALDisk: 4 * parse.Gigabyte, - }, - { - desc: "all flags", - flagPGConfig: okPGConfig, - flagMemory: "128" + parse.GB, - flagNumCPUs: 1, - flagPGVersion: pgutils.MajorVersion96, - wantMemory: 128 * parse.Gigabyte, - wantCPUs: 1, - wantPGVersion: pgutils.MajorVersion96, - }, - { - desc: "none flags", - flagPGConfig: okPGConfig, - wantMemory: totalMemory, - wantCPUs: runtime.NumCPU(), - wantPGVersion: okPGVersion, + desc: "use mem flag only", + flagPGConfig: okPGConfig, + flagMemory: "1" + parse.GB, + wantMemory: 1 * parse.Gigabyte, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault, + wantCPUs: runtime.NumCPU(), + wantPGVersion: okPGVersion, + }, + { + desc: "use cpu flag only", + flagPGConfig: okPGConfig, + flagNumCPUs: 2, + wantMemory: totalMemory, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault, + wantCPUs: 2, + wantPGVersion: okPGVersion, + }, + { + desc: "use pg-version flag only", + flagPGVersion: pgutils.MajorVersion10, + wantMemory: totalMemory, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault, + wantCPUs: runtime.NumCPU(), + wantPGVersion: pgutils.MajorVersion10, + }, + { + desc: "use wal-disk flag only", + flagPGConfig: okPGConfig, + flagWALDisk: "4GB", + wantMemory: totalMemory, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault, + wantCPUs: runtime.NumCPU(), + wantPGVersion: okPGVersion, + wantWALDisk: 4 * parse.Gigabyte, + }, + { + desc: "use max-bg-workers flag only", + flagPGConfig: okPGConfig, + flagMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault * 2, + wantMemory: totalMemory, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault * 2, + wantCPUs: runtime.NumCPU(), + wantPGVersion: okPGVersion, + }, + { + desc: "all flags", + flagPGConfig: okPGConfig, + flagMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault * 3, + flagMemory: "128" + parse.GB, + flagNumCPUs: 1, + flagPGVersion: pgutils.MajorVersion96, + wantMemory: 128 * parse.Gigabyte, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault * 3, + wantCPUs: 1, + wantPGVersion: pgutils.MajorVersion96, + }, + { + desc: "none flags", + flagPGConfig: okPGConfig, + wantMemory: totalMemory, + wantMaxBGWorkers: pgtune.MaxBackgroundWorkersDefault, + wantCPUs: runtime.NumCPU(), + wantPGVersion: okPGVersion, }, } @@ -178,11 +196,12 @@ func TestTunerInitializeSystemConfig(t *testing.T) { for _, c := range cases { t.Run(c.desc, func(t *testing.T) { tuner := &Tuner{nil, nil, &TunerFlags{ - PGConfig: c.flagPGConfig, - PGVersion: c.flagPGVersion, - Memory: c.flagMemory, - NumCPUs: c.flagNumCPUs, - WALDiskSize: c.flagWALDisk, + PGConfig: c.flagPGConfig, + PGVersion: c.flagPGVersion, + Memory: c.flagMemory, + NumCPUs: c.flagNumCPUs, + MaxBGWorkers: c.flagMaxBGWorkers, + WALDiskSize: c.flagWALDisk, }} config, err := tuner.initializeSystemConfig() if len(c.errMsg) == 0 { @@ -203,6 +222,9 @@ func TestTunerInitializeSystemConfig(t *testing.T) { if got := config.WALDiskSize; got != c.wantWALDisk { t.Errorf("incorrect WAL disk: got %d want %d", got, c.wantWALDisk) } + if got := config.MaxBGWorkers; got != c.wantMaxBGWorkers { + t.Errorf("incorrect bg workers: got %d want %d", got, c.wantMaxBGWorkers) + } } else { if err == nil { t.Errorf("unexpected lack of error") @@ -1029,6 +1051,7 @@ const ( testMaxConns = 20 testMem uint64 = 8 * parse.Gigabyte testCPUs = 4 + testWorkers = pgtune.MaxBackgroundWorkersDefault testWALDisk uint64 = 0 ) @@ -1041,7 +1064,7 @@ func (sg *testSettingsGroup) Keys() []string { return sg.key func (sg *testSettingsGroup) GetRecommender() pgtune.Recommender { return &badRecommender{} } func getDefaultSystemConfig(t *testing.T) *pgtune.SystemConfig { - config, err := pgtune.NewSystemConfig(testMem, testCPUs, pgutils.MajorVersion10, testWALDisk, testMaxConns) + config, err := pgtune.NewSystemConfig(testMem, testCPUs, pgutils.MajorVersion10, testWALDisk, testMaxConns, testWorkers) if err != nil { t.Fatalf("unexpected error in config creation: got %v", err) }