diff --git a/Makefile b/Makefile index 091703b3d..b56bee351 100644 --- a/Makefile +++ b/Makefile @@ -49,7 +49,6 @@ release: clean dirty_worktree_check dist/linux_amd64/baur dist/darwin_amd64/baur @echo - git push --tags @echo - upload $(ls dist/*/*.tar.xz) files - .PHONY: check check: $(info * running static code checks) diff --git a/command/apps_builds.go b/command/apps_builds.go index 8ae0c945a..6b3a71440 100644 --- a/command/apps_builds.go +++ b/command/apps_builds.go @@ -120,7 +120,7 @@ func resultAddUploadResult(appName string, ar baur.BuildOutput, r *upload.Result b, exist := result[appName] if !exist { - log.Fatalf("resultAddUploadResult: %q does not exist in build result map\n", appName) + log.Fatalf("resultAddUploadResult: %q does not exist in build result map", appName) } if r.Job.Type() == upload.JobDocker { @@ -131,12 +131,12 @@ func resultAddUploadResult(appName string, ar baur.BuildOutput, r *upload.Result artDigest, err := ar.Digest() if err != nil { - log.Fatalf("getting digest for output %q failed: %s\n", ar, err) + log.Fatalf("getting digest for output %q failed: %s", ar, err) } arSize, err := ar.Size(&outputBackends) if err != nil { - log.Fatalf("getting size of output %q failed: %s\n", ar, err) + log.Fatalf("getting size of output %q failed: %s", ar, err) } b.Outputs = append(b.Outputs, &storage.Output{ @@ -157,7 +157,7 @@ func recordResultIsComplete(app *baur.App) (bool, *storage.Build) { b, exist := result[app.Name] if !exist { - log.Fatalf("recordResultIfComplete: %q does not exist in build result map\n", app.Name) + log.Fatalf("recordResultIfComplete: %q does not exist in build result map", app.Name) } if len(app.Outputs) == len(b.Outputs) { @@ -192,7 +192,7 @@ func calcDigests(app *baur.App) ([]*storage.Input, string) { // The storageInputs can be removed, apps.BuildInputs() can be used // instead to later fill the struct for the db - log.Debugf("%s: resolving build inputs and calculating digests...\n", app) + log.Debugf("%s: resolving build inputs and calculating digests...", app) buildInputs, err := app.BuildInputs() if err != nil { log.Fatalf("%s: resolving build input paths failed: %s\n", app, err) @@ -201,7 +201,7 @@ func calcDigests(app *baur.App) ([]*storage.Input, string) { for _, s := range buildInputs { d, err := s.Digest() if err != nil { - log.Fatalf("%s: calculating build input digest failed: %s\n", app, err) + log.Fatalf("%s: calculating build input digest failed: %s", app, err) } storageInputs = append(storageInputs, &storage.Input{ @@ -215,7 +215,7 @@ func calcDigests(app *baur.App) ([]*storage.Input, string) { if len(inputDigests) > 0 { td, err := sha384.Sum(inputDigests) if err != nil { - log.Fatalln("calculating total input digest failed:", err) + log.Fatalf("%s: calculating total input digest failed: %s", app, err) } totalDigest = td.String() @@ -247,14 +247,14 @@ func createBuildJobs(apps []*baur.App) []*build.Job { func startBGUploader(outputCnt int, uploadChan chan *upload.Result) upload.Manager { var dockerUploader *docker.Client - s3Uploader, err := s3.NewClient() + s3Uploader, err := s3.NewClient(log.StdLogger) if err != nil { log.Fatalln(err.Error()) } dockerUser, dockerPass := dockerAuthFromEnv() if len(dockerUser) != 0 { - log.Debugf("read docker registry auth data from %q, %q Env variables, authenticating as %q \n", + log.Debugf("read docker registry auth data from %q, %q Env variables, authenticating as %q", dockerEnvUsernameVar, dockerEnvPasswordVar, dockerUser) dockerUploader, err = docker.NewClientwAuth(dockerUser, dockerPass) } else { @@ -264,7 +264,7 @@ func startBGUploader(outputCnt int, uploadChan chan *upload.Result) upload.Manag log.Fatalln(err) } - uploader := sequploader.New(s3Uploader, dockerUploader, uploadChan) + uploader := sequploader.New(log.StdLogger, s3Uploader, dockerUploader, uploadChan) outputBackends.DockerClt = dockerUploader @@ -304,7 +304,7 @@ func waitPrintUploadStatus(uploader upload.Manager, uploadChan chan *upload.Resu log.Fatalf("upload of %q failed: %s\n", ud.Output, res.Err) } - log.Actionf("%s: %s uploaded to %s (%.3fs)\n", + fmt.Printf("%s: %s uploaded to %s (%.3fs)\n", ud.App.Name, ud.Output.LocalPath(), res.URL, res.Duration.Seconds()) resultAddUploadResult(ud.App.Name, ud.Output, res) @@ -315,7 +315,7 @@ func waitPrintUploadStatus(uploader upload.Manager, uploadChan chan *upload.Resu if err := store.Save(build); err != nil { log.Fatalf("storing build information about %q failed: %s", ud.App.Name, err) } - log.Infof("%s: build %d stored in database\n", ud.App.Name, build.ID) + fmt.Printf("%s: build %d stored in database\n", ud.App.Name, build.ID) log.Debugf("stored the following build information: %s\n", prettyprint.AsString(build)) } @@ -341,14 +341,14 @@ func outstandingBuilds(storage storage.Storer, apps []*baur.App) []*baur.App { res = append(res, app) } - if !log.DebugEnabled { + if !verboseFlag { fmt.Printf(".") } log.Debugf("\n%s: build status. %q\n", app, buildStatus) } - if !log.DebugEnabled { + if !log.DebugEnabled() { fmt.Println() } @@ -370,15 +370,18 @@ func appBuildRun(cmd *cobra.Command, args []string) { } if !buildForce { - log.Actionf("identifying applications with outstanding builds") + fmt.Printf("identifying applications with outstanding builds") + if verboseFlag { + fmt.Println() + } apps = outstandingBuilds(store, apps) } if len(apps) == 0 { fmt.Println() term.PrintSep() - fmt.Printf("Application build(s) already exist, nothing to build, see 'baur ls -b'.\n" + - "If you want to rebuild applications pass '-f' to 'baur build'\n") + fmt.Println("Application build(s) already exist, nothing to build, see 'baur ls -b'.\n" + + "If you want to rebuild applications pass '-f' to 'baur build'") os.Exit(0) } @@ -397,10 +400,10 @@ func appBuildRun(cmd *cobra.Command, args []string) { uploadWatchFin = make(chan struct{}, 1) go waitPrintUploadStatus(uploader, uploadChan, uploadWatchFin, outputCnt) - log.Actionf("building and uploading the following applications: \n%s\n", + fmt.Printf("building and uploading the following applications:\n%s\n", appsToString(apps)) } else { - log.Actionf("building the following applications: \n%s\n", + fmt.Printf("building the following applications:\n%s\n", appsToString(apps)) } @@ -413,28 +416,29 @@ func appBuildRun(cmd *cobra.Command, args []string) { app := bud.App if status.Error != nil { - log.Fatalf("%s: build failed: %s\n", app.Name, status.Error) + log.Fatalf("%s: build failed: %s", app.Name, status.Error) } if status.ExitCode != 0 { log.Fatalf("%s: build failed: command (%q) exited with code %d "+ - "Output: %s\n", + "Output: %s", app.Name, status.Job.Command, status.ExitCode, status.Output) } - log.Actionf("%s: build successful (%.3fs)\n", app.Name, status.StopTs.Sub(status.StartTs).Seconds()) + fmt.Printf("%s: build successful (%.3fs)\n", app.Name, status.StopTs.Sub(status.StartTs).Seconds()) resultAddBuildResult(bud, status) for _, ar := range app.Outputs { if !ar.Exists() { - log.Fatalf("build output %q of %s did not exist after build\n", - ar, app) + log.Fatalf("%s: build output %q did not exist after build", + app, ar) } if buildUpload { uj, err := ar.UploadJob() if err != nil { - log.Fatalf("could not get upload job for build output %s: %s", ar, err) + log.Fatalf("%s: could not get upload job for build output %s: %s", + app, ar, err) } uj.SetUserData(&uploadUserData{ @@ -447,22 +451,22 @@ func appBuildRun(cmd *cobra.Command, args []string) { } d, err := ar.Digest() if err != nil { - log.Fatalf("%s: could determine input digest of %s: %s\n", + log.Fatalf("%s: calculating input digest of %s failed: %s", app.Name, ar, err) } - log.Actionf("%s: created %s (%s)\n", app.Name, ar, d) + fmt.Printf("%s: created %s (%s)\n", app.Name, ar, d) } } if buildUpload && outputCnt > 0 { - log.Actionf("waiting for uploads to finish...\n") + fmt.Println("waiting for uploads to finish...") <-uploadWatchFin } term.PrintSep() - log.Infof("finished in %s\n", time.Since(startTs)) + fmt.Printf("finished in %s\n", time.Since(startTs)) } func mustGetBuildStatus(app *baur.App, storage storage.Storer) (baur.BuildStatus, *storage.Build, string) { @@ -470,7 +474,7 @@ func mustGetBuildStatus(app *baur.App, storage storage.Storer) (baur.BuildStatus status, build, err := baur.GetBuildStatus(storage, app) if err != nil { - log.Fatalf("evaluating build status of %s failed: %s\n", app, err) + log.Fatalf("%s: evaluating build status of failed: %s", app, err) } if build != nil { diff --git a/command/apps_init.go b/command/apps_init.go index 57438b567..3b227edc6 100644 --- a/command/apps_init.go +++ b/command/apps_init.go @@ -1,6 +1,7 @@ package command import ( + "fmt" "os" "path" "strings" @@ -58,6 +59,6 @@ func appsInit(cmd *cobra.Command, args []string) { log.Fatalln(err) } - log.Infof("configuration file for %s was written to %s\n", + fmt.Printf("configuration file for %s was written to %s\n", appName, baur.AppCfgFile) } diff --git a/command/builds_verify.go b/command/builds_verify.go index 148421412..909bd0add 100644 --- a/command/builds_verify.go +++ b/command/builds_verify.go @@ -62,7 +62,7 @@ func verify(cmd *cobra.Command, args []string) { const dateLayout = "2006.01.02" startTs, err := time.Parse(dateLayout, verifyFromDate) if err != nil { - log.Fatalf("parsing start date value failed: %s\n:", err) + log.Fatalf("parsing start date value failed: %s:", err) } repo := MustFindRepository() @@ -78,7 +78,7 @@ func verify(cmd *cobra.Command, args []string) { log.Fatalln("retrieving applications from storage failed:", err) } - log.Actionf("scanning for builds after %s with same inputs that produced different outputs...\n", startTs.Format(dateLayout)) + fmt.Printf("scanning for builds after %s with same inputs that produced different outputs...\n", startTs.Format(dateLayout)) var issuesFound bool for _, app := range storedApps { diff --git a/command/helpers.go b/command/helpers.go index b8b4f78d8..e5fb79350 100644 --- a/command/helpers.go +++ b/command/helpers.go @@ -28,14 +28,14 @@ func MustFindRepository() *baur.Repository { if err != nil { if os.IsNotExist(err) { log.Fatalf("could not find repository root config file "+ - "ensure the file '%s' exist in the root\n", + "ensure the file '%s' exist in the root", baur.RepositoryCfgFile) } log.Fatalln(err) } - log.Debugf("repository root found: %v\n", rep.Path) + log.Debugf("repository root found: %s", rep.Path) return rep } @@ -54,7 +54,7 @@ func mustArgToApp(repo *baur.Repository, arg string) *baur.App { if isAppDir(arg) { app, err := repo.AppByDir(arg) if err != nil { - log.Fatalf("could not find application in dir '%s': %s\n", arg, err) + log.Fatalf("could not find application in dir '%s': %s", arg, err) } return app @@ -63,7 +63,7 @@ func mustArgToApp(repo *baur.Repository, arg string) *baur.App { app, err := repo.AppByName(arg) if err != nil { if os.IsNotExist(err) { - log.Fatalf("could not find application with name '%s'\n", arg) + log.Fatalf("could not find application with name '%s'", arg) } log.Fatalln(err) } @@ -75,7 +75,7 @@ func mustArgToApp(repo *baur.Repository, arg string) *baur.App { func MustGetPostgresClt(r *baur.Repository) *postgres.Client { clt, err := postgres.New(r.PSQLURL) if err != nil { - log.Fatalf("could not establish connection to postgreSQL db: %s\n", err) + log.Fatalf("could not establish connection to postgreSQL db: %s", err) } return clt @@ -119,7 +119,7 @@ func mustArgToApps(repo *baur.Repository, args []string) []*baur.App { } if len(apps) == 0 { - log.Fatalf("could not find any applications\n"+ + log.Fatalf("could not find any applications"+ "- ensure the [Discover] section is correct in %s\n"+ "- ensure that you have >1 application dirs "+ "containing a %s file\n", diff --git a/command/inputs_show.go b/command/inputs_show.go index 8b280e26a..309e01fbe 100644 --- a/command/inputs_show.go +++ b/command/inputs_show.go @@ -50,7 +50,7 @@ func inputsShow(cmd *cobra.Command, args []string) { writeHeaders := !inputsShowConfig.quiet && !inputsShowConfig.csv if len(app.BuildInputPaths) == 0 { - log.Fatalf("No build inputs have been configured in the %s file of %s\n", baur.AppCfgFile, app.Name) + log.Fatalf("No build inputs are configured in %s of %s", baur.AppCfgFile, app.Name) } if writeHeaders { diff --git a/command/repo_init.go b/command/repo_init.go index 5a9bc31a2..940f794d9 100644 --- a/command/repo_init.go +++ b/command/repo_init.go @@ -1,6 +1,7 @@ package command import ( + "fmt" "os" "path" @@ -24,7 +25,7 @@ var initCmd = &cobra.Command{ func initRepositoryCfg(cmd *cobra.Command, args []string) { rep, err := baur.FindRepository() if err == nil { - log.Fatalf("repository configuration %s already exist\n", + log.Fatalf("repository configuration %s already exist", path.Join(rep.Path, baur.RepositoryCfgFile)) } @@ -44,6 +45,6 @@ func initRepositoryCfg(cmd *cobra.Command, args []string) { log.Fatalln(err) } - log.Infof("written example repository configuration to %s\n", + fmt.Printf("written example repository configuration to %s\n", baur.RepositoryCfgFile) } diff --git a/command/root.go b/command/root.go index 6885754b6..a5b45af04 100644 --- a/command/root.go +++ b/command/root.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" + "github.com/simplesurance/baur/exec" "github.com/simplesurance/baur/log" "github.com/simplesurance/baur/version" ) @@ -25,7 +26,10 @@ var cpuProfilingFlag bool var defCPUProfFile = filepath.Join(os.TempDir(), "baur-cpu.prof") func initSb(_ *cobra.Command, _ []string) { - log.DebugEnabled = verboseFlag + if verboseFlag { + log.StdLogger.EnableDebug(verboseFlag) + exec.SetDebugOutputFn(log.StdLogger.Debugf) + } if cpuProfilingFlag { cpuProfFile, err := os.Create(defCPUProfFile) @@ -50,7 +54,7 @@ func Execute() { } if cpuProfilingFlag { - log.Infof("\ncpu profile written to %q\n", defCPUProfFile) + fmt.Printf("\ncpu profile written to %q\n", defCPUProfFile) pprof.StopCPUProfile() } } diff --git a/docker/docker.go b/docker/docker.go index 8edc42265..3fb6d452d 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -12,8 +12,6 @@ import ( "docker.io/go-docker" "docker.io/go-docker/api/types" "github.com/pkg/errors" - - "github.com/simplesurance/baur/log" ) // Client is a docker client @@ -110,10 +108,6 @@ func (c *Client) Upload(ctx context.Context, image, dest string) (string, error) r := bufio.NewReader(closer) for { status, err := r.ReadBytes('\n') - - log.Debugf("docker Upload of %s to %s, read server response: %q\n", - image, dest, status) - if err == io.EOF { break } diff --git a/exec/exec.go b/exec/exec.go index 683fef2c9..a585a1d11 100644 --- a/exec/exec.go +++ b/exec/exec.go @@ -7,15 +7,20 @@ import ( "syscall" "github.com/pkg/errors" - - "github.com/simplesurance/baur/log" ) +var debugOutputFn = func(string, ...interface{}) { return } + +// SetDebugOutputFn configures the package to pass debug output to this function +func SetDebugOutputFn(fn func(format string, v ...interface{})) { + debugOutputFn = fn +} + // Command runs the passed command in a shell in the passed dir. // If the command exits with a code != 0, err is nil func Command(dir, command string) (output string, exitCode int, err error) { cmd := exec.Command("sh", "-c", command) - log.Debugf("running in %q \"%s %s\"\n", dir, cmd.Path, strings.Join(cmd.Args, " ")) + debugOutputFn("running in %q \"%s %s\"\n", dir, cmd.Path, strings.Join(cmd.Args, " ")) outReader, err := cmd.StdoutPipe() if err != nil { @@ -34,7 +39,7 @@ func Command(dir, command string) (output string, exitCode int, err error) { in := bufio.NewScanner(outReader) for in.Scan() { o := in.Text() - log.Debugln(o) + debugOutputFn(o) output += o + "\n" } diff --git a/gosrcdir.go b/gosrcdir.go index 545efe506..609464f7d 100644 --- a/gosrcdir.go +++ b/gosrcdir.go @@ -9,7 +9,6 @@ import ( "github.com/simplesurance/baur/fs" "github.com/simplesurance/baur/golang" - "github.com/simplesurance/baur/log" ) // GoSrcDirs resolves Golang source files in directories to files including @@ -51,8 +50,6 @@ func (g *GoSrcDirs) Resolve() ([]BuildInput, error) { fullpaths = append(fullpaths, absPath) } - log.Debugf("resolving go src files, GOPATH=%q, srcdirs=%q\n", g.gopath, fullpaths) - absSrcPaths, err := golang.SrcFiles(g.gopath, fullpaths...) if err != nil { return nil, err diff --git a/log/log.go b/log/log.go index b4d210725..72500ad4e 100644 --- a/log/log.go +++ b/log/log.go @@ -2,119 +2,149 @@ package log import ( "fmt" + "log" "os" - "sync" ) -var lock = sync.Mutex{} - -// DebugEnabled set to true to print debug message otherwise they are suppressed -var DebugEnabled bool - -const actionPrefix = "*" const errorPrefix = "ERROR" -// Actionln prints something with the ActionPrefix preprended -func Actionln(v ...interface{}) { - lock.Lock() - defer lock.Unlock() +// Logger logs messages +type Logger struct { + debugEnabled bool + logger *log.Logger +} - if len(v) != 0 { - v[0] = fmt.Sprintf("%s %s", actionPrefix, v[0]) - } +// StdLogger is the logger that is used from the log functions in this package +var StdLogger = New(false) - fmt.Println(v...) +// New returns a new Logger that logs to Stderr. +// Debug messages are only printed if debugEnabled is true +func New(debugEnabled bool) *Logger { + return &Logger{ + debugEnabled: debugEnabled, + logger: log.New(os.Stderr, "", 0), + } } -// Actionf prints something with the ActionPrefix preprended -func Actionf(format string, v ...interface{}) { - lock.Lock() - defer lock.Unlock() +// EnableDebug enables/disables logging debug messages +func (l *Logger) EnableDebug(enabled bool) { + l.debugEnabled = enabled +} - fmt.Printf(actionPrefix+" "+format, v...) +// DebugEnabled returns true if logging debug messages is enabled +func (l *Logger) DebugEnabled() bool { + return l.debugEnabled } // Debugln logs a debug message to stdout. // It's only shown if debugging is enabled. -func Debugln(v ...interface{}) { - lock.Lock() - defer lock.Unlock() - - if !DebugEnabled { +func (l *Logger) Debugln(v ...interface{}) { + if !l.debugEnabled { return } - fmt.Println(v...) + l.logger.Println(v...) } // Debugf logs a debug message to stdout. // It's only shown if debugging is enabled. -func Debugf(format string, v ...interface{}) { - lock.Lock() - defer lock.Unlock() - - if !DebugEnabled { +func (l *Logger) Debugf(format string, v ...interface{}) { + if !l.debugEnabled { return } - fmt.Printf(format, v...) + l.logger.Printf(format, v...) } // Fatalln logs a message to stderr and terminates the application with an error -func Fatalln(v ...interface{}) { - lock.Lock() - defer lock.Unlock() - +func (l *Logger) Fatalln(v ...interface{}) { if len(v) != 0 { v[0] = fmt.Sprintf("%s: %s", errorPrefix, v[0]) } - fmt.Fprintln(os.Stderr, v...) - - os.Exit(1) + l.logger.Fatalln(v...) } // Fatalf logs a message to stderr and terminates the application with an error -func Fatalf(format string, v ...interface{}) { - lock.Lock() - defer lock.Unlock() - - fmt.Fprintf(os.Stderr, errorPrefix+": "+format, v...) - os.Exit(1) +func (l *Logger) Fatalf(format string, v ...interface{}) { + l.logger.Fatalf(errorPrefix+": "+format, v...) } // Errorln logs a message to stderr -func Errorln(v ...interface{}) { - lock.Lock() - defer lock.Unlock() - +func (l *Logger) Errorln(v ...interface{}) { if len(v) != 0 { v[0] = fmt.Sprintf("%s: %s", errorPrefix, v[0]) } - fmt.Fprintln(os.Stderr, v...) + l.logger.Println(v...) } // Errorf logs a message to stderr -func Errorf(format string, v ...interface{}) { - lock.Lock() - defer lock.Unlock() +func (l *Logger) Errorf(format string, v ...interface{}) { + l.logger.Printf(errorPrefix+": "+format, v...) +} + +// Infoln logs a message to stdout +/* +func (l *Logger) Infoln(v ...interface{}) { + l.logger.Println(v...) +} +*/ + +// Infof logs a message to stdout +/* +func (l *Logger) Infof(format string, v ...interface{}) { + l.logger.Printf(format, v...) +} +*/ + +// DebugEnabled returns true if the Stdlogger logs debug messages +func DebugEnabled() bool { + return StdLogger.DebugEnabled() +} + +// Debugln logs a debug message to stdout. +// It's only shown if debugging is enabled. +func Debugln(v ...interface{}) { + StdLogger.Debugln(v...) +} + +// Debugf logs a debug message to stdout. +// It's only shown if debugging is enabled. +func Debugf(format string, v ...interface{}) { + StdLogger.Debugf(format, v...) +} - fmt.Fprintf(os.Stderr, errorPrefix+": "+format, v...) +// Fatalln logs a message to stderr and terminates the application with an error +func Fatalln(v ...interface{}) { + StdLogger.Fatalln(v...) +} + +// Fatalf logs a message to stderr and terminates the application with an error +func Fatalf(format string, v ...interface{}) { + StdLogger.Fatalf(format, v...) +} + +// Errorln logs a message to stderr +func Errorln(v ...interface{}) { + StdLogger.Errorln(v...) +} + +// Errorf logs a message to stderr +func Errorf(format string, v ...interface{}) { + StdLogger.Errorf(format, v...) } // Infoln logs a message to stdout +/* func Infoln(v ...interface{}) { - lock.Lock() - defer lock.Unlock() - - fmt.Println(v...) + StdLogger.Infoln(v...) } +*/ // Infof logs a message to stdout +/* func Infof(format string, v ...interface{}) { - lock.Lock() - defer lock.Unlock() - - fmt.Printf(format, v...) + StdLogger.Infof(format, v...) } +*/ diff --git a/log/s3.go b/log/s3.go deleted file mode 100644 index b6c298b11..000000000 --- a/log/s3.go +++ /dev/null @@ -1,9 +0,0 @@ -package log - -// S3Logger is a logger compatible with the S3 package -type S3Logger struct{} - -// Log logs a debug message -func (l *S3Logger) Log(args ...interface{}) { - Debugln(args...) -} diff --git a/s3/s3.go b/s3/s3.go index 0afba270e..8c3360729 100644 --- a/s3/s3.go +++ b/s3/s3.go @@ -8,8 +8,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3/s3manager" - - "github.com/simplesurance/baur/log" ) // Client is a S3 uploader client @@ -18,6 +16,13 @@ type Client struct { uploader *s3manager.Uploader } +// Logger defines the interface for an S3 logger +type Logger interface { + Debugf(format string, v ...interface{}) + Debugln(v ...interface{}) + DebugEnabled() bool +} + // DefaultRetries is the number of retries for a S3 upload until an error is // raised const DefaultRetries = 3 @@ -25,16 +30,20 @@ const DefaultRetries = 3 // NewClient returns a new S3 Client, configuration is read from env variables // or configuration files, // see https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html -func NewClient() (*Client, error) { +func NewClient(logger Logger) (*Client, error) { + loglvl := aws.LogLevel(aws.LogOff) + if logger.DebugEnabled() { + loglvl = aws.LogLevel(aws.LogDebug) + } + cfg := aws.Config{ - Logger: &log.S3Logger{}, - LogLevel: aws.LogLevel(aws.LogDebug), + Logger: aws.LoggerFunc(logger.Debugln), + LogLevel: loglvl, MaxRetries: aws.Int(DefaultRetries), S3ForcePathStyle: aws.Bool(true), } sess, err := session.NewSession(&cfg) - if err != nil { return nil, err } @@ -84,7 +93,6 @@ func (c *Client) Upload(file string, dest string) (string, error) { if err != nil { return "", err } - defer f.Close() res, err := c.uploader.Upload(&s3manager.UploadInput{ @@ -92,7 +100,6 @@ func (c *Client) Upload(file string, dest string) (string, error) { Key: aws.String(fileFromURL(url)), Body: f, }) - if err != nil { return "", err } diff --git a/upload/seq/manager.go b/upload/seq/manager.go index eec81eb71..35c60b091 100644 --- a/upload/seq/manager.go +++ b/upload/seq/manager.go @@ -10,10 +10,14 @@ import ( "github.com/pkg/errors" - "github.com/simplesurance/baur/log" "github.com/simplesurance/baur/upload" ) +// Logger defines the logger interface +type Logger interface { + Debugf(format string, v ...interface{}) +} + // Uploader is a sequential uploader type Uploader struct { s3 upload.S3Uploader @@ -22,12 +26,14 @@ type Uploader struct { queue []upload.Job stopProcessing bool statusChan chan<- *upload.Result + logger Logger } // New initializes a sequential uploader // Status chan must have a buffer count > 1 otherwise a deadlock occurs -func New(s3Uploader upload.S3Uploader, dockerUploader upload.DockerUploader, status chan<- *upload.Result) *Uploader { +func New(logger Logger, s3Uploader upload.S3Uploader, dockerUploader upload.DockerUploader, status chan<- *upload.Result) *Uploader { return &Uploader{ + logger: logger, s3: s3Uploader, statusChan: status, lock: sync.Mutex{}, @@ -62,7 +68,7 @@ func (u *Uploader) Start() { var url string startTs := time.Now() - log.Debugf("uploading %s\n", job) + u.logger.Debugf("uploading %s", job) if job.Type() == upload.JobS3 { url, err = u.s3.Upload(job.LocalPath(), job.RemoteDest()) if err != nil {