diff --git a/app.go b/app.go index 027c00365..55cc34fca 100644 --- a/app.go +++ b/app.go @@ -17,6 +17,7 @@ import ( "github.com/simplesurance/baur/resolve/gitpath" "github.com/simplesurance/baur/resolve/glob" "github.com/simplesurance/baur/resolve/gosource" + "github.com/simplesurance/baur/upload/scheduler" ) // App represents an application @@ -71,20 +72,50 @@ func (a *App) setDockerOutputsFromCfg(cfg *cfg.App) error { func (a *App) setFileOutputsFromCFG(cfg *cfg.App) error { for _, f := range cfg.Build.Output.File { - destFile, err := replaceGitCommitVar(f.S3Upload.DestFile, a.Repository) - if err != nil { - return errors.Wrap(err, "replacing $GITCOMMIT in dest_file failed") + if !f.S3Upload.IsEmpty() { + destFile, err := replaceGitCommitVar(f.S3Upload.DestFile, a.Repository) + if err != nil { + return errors.Wrap(err, "replacing $GITCOMMIT in dest_file failed") + } + + destFile = replaceUUIDvar(replaceAppNameVar(destFile, a.Name)) + url := "s3://" + f.S3Upload.Bucket + "/" + destFile + + src := path.Join(a.Path, f.Path) + + a.Outputs = append(a.Outputs, &FileArtifact{ + RelPath: path.Join(a.RelPath, f.Path), + Path: src, + DestFile: destFile, + UploadURL: url, + uploadJob: &scheduler.S3Job{ + DestURL: url, + FilePath: src, + }, + }) } - destFile = replaceUUIDvar(replaceAppNameVar(destFile, a.Name)) - url := "s3://" + f.S3Upload.Bucket + "/" + destFile + if !f.FileCopy.IsEmpty() { + dest, err := replaceGitCommitVar(f.FileCopy.Path, a.Repository) + if err != nil { + return errors.Wrap(err, "replacing $GITCOMMIT in path failed") + } + + dest = replaceUUIDvar(replaceAppNameVar(dest, a.Name)) + src := path.Join(a.Path, f.Path) + + a.Outputs = append(a.Outputs, &FileArtifact{ + RelPath: path.Join(a.RelPath, f.Path), + Path: src, + DestFile: dest, + UploadURL: dest, + uploadJob: &scheduler.FileCopyJob{ + Src: src, + Dst: dest, + }, + }) - a.Outputs = append(a.Outputs, &FileArtifact{ - RelPath: path.Join(a.RelPath, f.Path), - Path: path.Join(a.Path, f.Path), - DestFile: destFile, - UploadURL: url, - }) + } } return nil diff --git a/cfg/app.go b/cfg/app.go index 101311041..961bdf07f 100644 --- a/cfg/app.go +++ b/cfg/app.go @@ -57,6 +57,12 @@ type BuildOutput struct { type FileOutput struct { Path string `toml:"path" comment:"Path relative to the application directory" commented:"true"` S3Upload S3Upload `comment:"S3 location where the file is uploaded to"` + FileCopy FileCopy +} + +// FileCopy describes where a file artifact should be copied to +type FileCopy struct { + Path string `toml:"path" comment:"Path where the file copied to" commented:"true"` } // DockerImageRegistryUpload holds information about where the docker image @@ -105,6 +111,10 @@ func ExampleApp(name string) *App { Bucket: "go-artifacts/", DestFile: "$APPNAME-worker-$GITCOMMIT.tar.xz", }, + FileCopy: FileCopy{ + + Path: "/mnt/fileserver/build_artifacts/$APPNAME-$GITCOMMIT.tar.xz", + }, }, &FileOutput{ Path: fmt.Sprintf("dist/%s.tar.xz", name), @@ -277,9 +287,14 @@ func (b *BuildOutput) Validate() error { return nil } +// IsEmpty returns true if FileCopy is empty +func (f *FileCopy) IsEmpty() bool { + return len(f.Path) == 0 +} + // IsEmpty returns true if FileOutput is empty func (f *FileOutput) IsEmpty() bool { - return len(f.Path) == 0 && f.S3Upload.IsEmpty() + return f.FileCopy.IsEmpty() && f.S3Upload.IsEmpty() } // IsEmpty returns true if S3Upload is empty diff --git a/command/build.go b/command/build.go index 2edca04b2..a32b5c164 100644 --- a/command/build.go +++ b/command/build.go @@ -19,6 +19,7 @@ import ( "github.com/simplesurance/baur/storage" "github.com/simplesurance/baur/term" "github.com/simplesurance/baur/upload/docker" + "github.com/simplesurance/baur/upload/filecopy" "github.com/simplesurance/baur/upload/s3" "github.com/simplesurance/baur/upload/scheduler" sequploader "github.com/simplesurance/baur/upload/scheduler/seq" @@ -142,10 +143,15 @@ func resultAddUploadResult(appName string, ar baur.BuildOutput, r *scheduler.Res log.Fatalf("resultAddUploadResult: %q does not exist in build result map", appName) } - if r.Job.Type() == scheduler.JobDocker { + switch r.Job.Type() { + case scheduler.JobDocker: arType = storage.DockerArtifact - } else if r.Job.Type() == scheduler.JobS3 { + case scheduler.JobFileCopy: + fallthrough + case scheduler.JobS3: arType = storage.FileArtifact + default: + panic(fmt.Sprintf("unknown job type %v", r.Job.Type())) } artDigest, err := ar.Digest() @@ -284,7 +290,9 @@ func startBGUploader(outputCnt int, uploadChan chan *scheduler.Result) scheduler log.Fatalln(err) } - uploader := sequploader.New(log.StdLogger, s3Uploader, dockerUploader, uploadChan) + filecopyUploader := filecopy.New(log.Debugf) + + uploader := sequploader.New(log.StdLogger, filecopyUploader, s3Uploader, dockerUploader, uploadChan) outputBackends.DockerClt = dockerUploader diff --git a/fileartifact.go b/fileartifact.go index fc23acde8..41e737259 100644 --- a/fileartifact.go +++ b/fileartifact.go @@ -4,7 +4,7 @@ import ( "github.com/simplesurance/baur/digest" "github.com/simplesurance/baur/digest/sha384" "github.com/simplesurance/baur/fs" - "github.com/simplesurance/baur/upload" + "github.com/simplesurance/baur/upload/scheduler" ) // FileArtifact is a file build artifact @@ -13,6 +13,7 @@ type FileArtifact struct { Path string DestFile string UploadURL string + uploadJob scheduler.Job } // Exists returns true if the artifact exist @@ -26,11 +27,8 @@ func (f *FileArtifact) String() string { } // UploadJob returns a upload.DockerJob for the artifact -func (f *FileArtifact) UploadJob() (upload.Job, error) { - return &upload.S3Job{ - DestURL: f.UploadURL, - FilePath: f.Path, - }, nil +func (f *FileArtifact) UploadJob() (scheduler.Job, error) { + return f.uploadJob, nil } // LocalPath returns the local path to the artifact diff --git a/fs/fs.go b/fs/fs.go index a0d7f6370..f975f6348 100644 --- a/fs/fs.go +++ b/fs/fs.go @@ -46,6 +46,7 @@ func DirsExist(paths []string) error { } // IsDir returns true if the path is a directory. +// If the directory does not exist, the error from os.Stat() is returned. func IsDir(path string) (bool, error) { fi, err := os.Stat(path) if err != nil { @@ -55,6 +56,33 @@ func IsDir(path string) (bool, error) { return fi.IsDir(), nil } +// IsRegularFile returns true if path is a regular file. +// If the directory does not exist, the error from os.Stat() is returned. +func IsRegularFile(path string) (bool, error) { + fi, err := os.Stat(path) + if err != nil { + return false, err + } + + return fi.Mode().IsRegular(), nil +} + +// SameFile calls os.Samefile(), if one of the files does not exist, the error +// from os.Stat() is returned. +func SameFile(a, b string) (bool, error) { + aFi, err := os.Stat(a) + if err != nil { + return false, err + } + + bFi, err := os.Stat(b) + if err != nil { + return false, err + } + + return os.SameFile(aFi, bFi), nil +} + // FindFileInParentDirs finds a directory that contains filename. The function // starts searching in startPath and then checks recursively each parent // directory for the file. It returns the absolute path to the first found diff --git a/upload/filecopy/filecopy.go b/upload/filecopy/filecopy.go new file mode 100644 index 000000000..27e922822 --- /dev/null +++ b/upload/filecopy/filecopy.go @@ -0,0 +1,116 @@ +package filecopy + +import ( + "io" + "os" + "path" + + "github.com/pkg/errors" + + "github.com/simplesurance/baur/fs" +) + +var defLogFn = func(string, ...interface{}) { return } + +// Client copies files from one path to another +type Client struct { + debugLogFn func(string, ...interface{}) +} + +// New returns a client +func New(debugLogFn func(string, ...interface{})) *Client { + logFn := defLogFn + if debugLogFn != nil { + logFn = debugLogFn + } + + return &Client{debugLogFn: logFn} +} + +func copyFile(src, dst string) error { + srcFd, err := os.Open(src) + if err != nil { + return errors.Wrapf(err, "opening %s failed", src) + } + + srcFi, err := os.Stat(src) + if err != nil { + return errors.Wrapf(err, "stat %s failed", src) + } + + srcFileMode := srcFi.Mode().Perm() + + dstFd, err := os.OpenFile(dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcFileMode) + if err != nil { + srcFd.Close() + return errors.Wrapf(err, "opening %s failed", dst) + } + + _, err = io.Copy(dstFd, srcFd) + if err != nil { + return err + } + + if err = srcFd.Close(); err != nil { + return err + } + + if err = dstFd.Close(); err != nil { + return err + } + + return err +} + +// Upload copies the file with src path to the dst path. +// If the destination directory does not exist, it is created. +// If the destination path exist and is not a regular file an error is returned. +// If it exist and is a file, the file is overwritten if it's not the same. +func (c *Client) Upload(src string, dst string) (string, error) { + destDir := path.Dir(dst) + + isDir, err := fs.IsDir(destDir) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + + err = fs.Mkdir(destDir) + if err != nil { + return "", errors.Wrapf(err, "creating directory '%s' failed", destDir) + } + + c.debugLogFn("filecopy: created directory '%s'", destDir) + } else { + if !isDir { + return "", errors.Wrapf(err, "%s is not a directory", destDir) + } + } + + regFile, err := fs.IsRegularFile(dst) + if err != nil { + if !os.IsNotExist(err) { + return "", err + } + + return dst, copyFile(src, dst) + } + + if !regFile { + return "", errors.Wrapf(err, "'%s' exist but is not a regular file", dst) + } + + sameFile, err := fs.SameFile(src, dst) + if err != nil { + return "", err + } + + if sameFile { + c.debugLogFn("filecopy: '%s' already exist and is the same then '%s'", dst, src) + return dst, nil + } + + c.debugLogFn("filecopy: '%s' already exist, overwriting file", dst) + + return dst, copyFile(src, dst) +} diff --git a/upload/scheduler/job.go b/upload/scheduler/job.go index 9deb0fb00..c770b4435 100644 --- a/upload/scheduler/job.go +++ b/upload/scheduler/job.go @@ -9,6 +9,8 @@ const ( JobS3 // JobDocker is the type for Docker container uploader jobs JobDocker + // JobFileCopy is a job for copying files from one place to another + JobFileCopy ) // Job is the interface for upload jobs diff --git a/upload/scheduler/seq/manager.go b/upload/scheduler/seq/manager.go index d99d5c76f..01208cd92 100644 --- a/upload/scheduler/seq/manager.go +++ b/upload/scheduler/seq/manager.go @@ -20,6 +20,7 @@ type Logger interface { // Uploader is a sequential uploader type Uploader struct { + filecopy upload.Uploader s3 upload.Uploader docker upload.Uploader lock sync.Mutex @@ -31,7 +32,7 @@ type Uploader struct { // New initializes a sequential uploader // Status chan must have a buffer count > 1 otherwise a deadlock occurs -func New(logger Logger, s3Uploader, dockerUploader upload.Uploader, status chan<- *scheduler.Result) *Uploader { +func New(logger Logger, filecopyUploader, s3Uploader, dockerUploader upload.Uploader, status chan<- *scheduler.Result) *Uploader { return &Uploader{ logger: logger, s3: s3Uploader, @@ -39,6 +40,7 @@ func New(logger Logger, s3Uploader, dockerUploader upload.Uploader, status chan< lock: sync.Mutex{}, queue: []scheduler.Job{}, docker: dockerUploader, + filecopy: filecopyUploader, } } @@ -69,17 +71,23 @@ func (u *Uploader) Start() { startTs := time.Now() u.logger.Debugf("uploading %s", job) - if job.Type() == scheduler.JobS3 { + switch job.Type() { + case scheduler.JobFileCopy: + url, err = u.filecopy.Upload(job.LocalPath(), job.RemoteDest()) + if err != nil { + err = errors.Wrap(err, "file copy failed") + } + case scheduler.JobS3: url, err = u.s3.Upload(job.LocalPath(), job.RemoteDest()) if err != nil { err = errors.Wrap(err, "S3 upload failed") } - } else if job.Type() == scheduler.JobDocker { + case scheduler.JobDocker: url, err = u.docker.Upload(job.LocalPath(), job.RemoteDest()) if err != nil { err = errors.Wrap(err, "Docker upload failed") } - } else { + default: panic(fmt.Sprintf("invalid job %+v", job)) }