From 98637eb2a70718f8b53775d601356e540d4e2c4c Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:12:05 +0200 Subject: [PATCH 01/16] Introduce new Storage layer --- cache/backend/azure.go | 50 ---- cache/backend/azure_test.go | 53 ---- cache/backend/backend.go | 259 ------------------ cache/backend/filesystem.go | 55 ---- cache/backend/filesystem_test.go | 9 - cache/backend/gcs.go | 63 ----- cache/backend/gcs_test.go | 9 - cache/backend/s3.go | 64 ----- cache/backend/s3_test.go | 9 - cache/backend/sftp.go | 48 ---- cache/backend/sftp_test.go | 65 ----- storage/backend/azure/azure.go | 125 +++++++++ storage/backend/azure/azure_test.go | 78 ++++++ storage/backend/azure/config.go | 14 + storage/backend/backend.go | 89 ++++++ storage/backend/config.go | 20 ++ storage/backend/filesystem/config.go | 6 + storage/backend/filesystem/filesystem.go | 117 ++++++++ storage/backend/filesystem/filesystem_test.go | 47 ++++ storage/backend/gcs/config.go | 14 + storage/backend/gcs/gcs.go | 182 ++++++++++++ storage/backend/gcs/gcs_test.go | 140 ++++++++++ storage/backend/s3/config.go | 31 +++ storage/backend/s3/s3.go | 116 ++++++++ storage/backend/s3/s3_test.go | 110 ++++++++ storage/backend/sftp/config.go | 28 ++ storage/backend/sftp/sftp.go | 161 +++++++++++ storage/backend/sftp/sftp_test.go | 83 ++++++ storage/storage.go | 71 +++++ 29 files changed, 1432 insertions(+), 684 deletions(-) delete mode 100644 cache/backend/azure.go delete mode 100644 cache/backend/azure_test.go delete mode 100644 cache/backend/backend.go delete mode 100644 cache/backend/filesystem.go delete mode 100644 cache/backend/filesystem_test.go delete mode 100644 cache/backend/gcs.go delete mode 100644 cache/backend/gcs_test.go delete mode 100644 cache/backend/s3.go delete mode 100644 cache/backend/s3_test.go delete mode 100644 cache/backend/sftp.go delete mode 100644 cache/backend/sftp_test.go create mode 100644 storage/backend/azure/azure.go create mode 100644 storage/backend/azure/azure_test.go create mode 100644 storage/backend/azure/config.go create mode 100644 storage/backend/backend.go create mode 100644 storage/backend/config.go create mode 100644 storage/backend/filesystem/config.go create mode 100644 storage/backend/filesystem/filesystem.go create mode 100644 storage/backend/filesystem/filesystem_test.go create mode 100644 storage/backend/gcs/config.go create mode 100644 storage/backend/gcs/gcs.go create mode 100644 storage/backend/gcs/gcs_test.go create mode 100644 storage/backend/s3/config.go create mode 100644 storage/backend/s3/s3.go create mode 100644 storage/backend/s3/s3_test.go create mode 100644 storage/backend/sftp/config.go create mode 100644 storage/backend/sftp/sftp.go create mode 100644 storage/backend/sftp/sftp_test.go create mode 100644 storage/storage.go diff --git a/cache/backend/azure.go b/cache/backend/azure.go deleted file mode 100644 index fd772597..00000000 --- a/cache/backend/azure.go +++ /dev/null @@ -1,50 +0,0 @@ -package backend - -import ( - "context" - "fmt" - "io" - - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/meltwater/drone-cache/cache" -) - -type azureBackend struct { - containerURL azblob.ContainerURL - ctx context.Context -} - -func newAzure(ctx context.Context, containerURL azblob.ContainerURL) cache.Backend { - return &azureBackend{ - containerURL: containerURL, - ctx: ctx, - } -} - -func (c *azureBackend) Get(p string) (io.ReadCloser, error) { - blobURL := c.containerURL.NewBlockBlobURL(p) - - downloadResponse, err := blobURL.Download(c.ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) - if err != nil { - return nil, fmt.Errorf("get the object %w", err) - } - - //nolint:mnd // NOTE: automatically retries are performed if the connection fails, not magic number - bodyStream := downloadResponse.Body(azblob.RetryReaderOptions{MaxRetryRequests: 4}) - - return bodyStream, nil -} - -// Put uploads the contents of the io.ReadSeeker -func (c *azureBackend) Put(p string, src io.ReadSeeker) error { - blobURL := c.containerURL.NewBlockBlobURL(p) - - fmt.Printf("uploading the file with blob name: %s\n", p) - - _, err := blobURL.Upload(c.ctx, src, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) - if err != nil { - return fmt.Errorf("put the object %w", err) - } - - return nil -} diff --git a/cache/backend/azure_test.go b/cache/backend/azure_test.go deleted file mode 100644 index 70b2835f..00000000 --- a/cache/backend/azure_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package backend - -import ( - "bytes" - "io/ioutil" - "math/rand" - "testing" - - "github.com/go-kit/kit/log" -) - -const defaultBlobStorageURL = "127.0.0.1:10000" - -var blobURL = getEnv("TEST_AZURITE_URL", defaultBlobStorageURL) - -func TestAzureTruth(t *testing.T) { - - b, err := InitializeAzureBackend(log.NewNopLogger(), - AzureConfig{ - AccountName: "devstoreaccount1", - AccountKey: "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==", - ContainerName: "testcontainer", - BlobStorageURL: blobURL, - Azurite: true, - }, true) - if err != nil { - t.Fatal(err) - } - - token := make([]byte, 32) - rand.Read(token) - testData := bytes.NewReader(token) - - // PUT TEST - err = b.Put("test_key", testData) - if err != nil { - t.Fatal(err) - } - - // GET TEST - readCloser, err := b.Get("test_key") - if err != nil { - t.Fatal(err) - } - - // Check the validity of returned bytes - readData, _ := ioutil.ReadAll(readCloser) - - if !bytes.Equal(readData, token) { - t.Fatal(string(readData), "!=", token) - } - -} diff --git a/cache/backend/backend.go b/cache/backend/backend.go deleted file mode 100644 index 829faf43..00000000 --- a/cache/backend/backend.go +++ /dev/null @@ -1,259 +0,0 @@ -package backend - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "path" - "strings" - - "github.com/meltwater/drone-cache/cache" - - "github.com/Azure/azure-storage-blob-go/azblob" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/sftp" - "golang.org/x/crypto/ssh" - "google.golang.org/api/option" -) - -// S3Config is a structure to store S3 backend configuration -type S3Config struct { - // Indicates the files ACL, which should be one - // of the following: - // private - // public-read - // public-read-write - // authenticated-read - // bucket-owner-read - // bucket-owner-full-control - ACL string - Bucket string - Encryption string // if not "", enables server-side encryption. valid values are: AES256, aws:kms - Endpoint string - Key string - - // us-east-1 - // us-west-1 - // us-west-2 - // eu-west-1 - // ap-southeast-1 - // ap-southeast-2 - // ap-northeast-1 - // sa-east-1 - Region string - Secret string - - PathStyle bool // Use path style instead of domain style. Should be true for minio and false for AWS -} - -// AzureConfig is a structure to store Azure backend configuration -type AzureConfig struct { - AccountName string - AccountKey string - ContainerName string - BlobStorageURL string - Azurite bool -} - -// FileSystemConfig is a structure to store filesystem backend configuration -type FileSystemConfig struct { - CacheRoot string -} - -// InitializeS3Backend creates an S3 backend -func InitializeS3Backend(l log.Logger, c S3Config, debug bool) (cache.Backend, error) { - awsConf := &aws.Config{ - Region: aws.String(c.Region), - Endpoint: &c.Endpoint, - DisableSSL: aws.Bool(!strings.HasPrefix(c.Endpoint, "https://")), - S3ForcePathStyle: aws.Bool(c.PathStyle), - } - - if c.Key != "" && c.Secret != "" { - awsConf.Credentials = credentials.NewStaticCredentials(c.Key, c.Secret, "") - } else { - level.Warn(l).Log("msg", "aws key and/or Secret not provided (falling back to anonymous credentials)") - } - - level.Debug(l).Log("msg", "s3 backend", "config", fmt.Sprintf("%+v", c)) - - if debug { - awsConf.WithLogLevel(aws.LogDebugWithHTTPBody) - } - - return newS3(c.Bucket, c.ACL, c.Encryption, awsConf), nil -} - -// InitializeAzureBackend creates an AzureBlob backend -func InitializeAzureBackend(l log.Logger, c AzureConfig, debug bool) (cache.Backend, error) { - // From the Azure portal, get your storage account name and key and set environment variables. - accountName, accountKey := c.AccountName, c.AccountKey - if len(accountName) == 0 || len(accountKey) == 0 { - return nil, fmt.Errorf("either the AZURE_ACCOUNT_NAME or AZURE_ACCOUNT_KEY environment variable is not set") - } - - // Create a default request pipeline using your storage account name and account key. - credential, err := azblob.NewSharedKeyCredential(accountName, accountKey) - if err != nil { - level.Error(l).Log("msg", "invalid credentials with error: "+err.Error()) - } - - var azureBlobURL *url.URL - - // Azurite has different URL pattern than production Azure Blob Storage - if c.Azurite { - azureBlobURL, err = url.Parse(fmt.Sprintf("http://%s/%s/%s", c.BlobStorageURL, c.AccountName, c.ContainerName)) - } else { - azureBlobURL, err = url.Parse(fmt.Sprintf("https://%s.%s/%s", c.AccountName, c.BlobStorageURL, c.ContainerName)) - } - - if err != nil { - level.Error(l).Log("msg", "can't create url with : "+err.Error()) - } - - pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) - containerURL := azblob.NewContainerURL(*azureBlobURL, pipeline) - ctx := context.Background() - - // Always creating new container, it will throw error if it already exists - _, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - if err != nil { - level.Debug(l).Log("msg", "container already exists:"+err.Error()) - } - - return newAzure(ctx, containerURL), nil -} - -// InitializeFileSystemBackend creates a filesystem backend -func InitializeFileSystemBackend(l log.Logger, c FileSystemConfig, debug bool) (cache.Backend, error) { - if strings.TrimRight(path.Clean(c.CacheRoot), "/") == "" { - return nil, fmt.Errorf("empty or root path given, <%s> as cache root, ", c.CacheRoot) - } - - if _, err := os.Stat(c.CacheRoot); err != nil { - return nil, fmt.Errorf("make sure volume is mounted, <%s> as cache root %w", c.CacheRoot, err) - } - - level.Debug(l).Log("msg", "filesystem backend", "config", fmt.Sprintf("%+v", c)) - - return newFileSystem(c.CacheRoot), nil -} - -type SSHAuthMethod string - -const ( - SSHAuthMethodPassword SSHAuthMethod = "PASSWORD" - SSHAuthMethodPublicKeyFile SSHAuthMethod = "PUBLIC_KEY_FILE" -) - -type SSHAuth struct { - Password string - PublicKeyFile string - Method SSHAuthMethod -} - -// SFTPConfig is a structure to store sftp backend configuration -type SFTPConfig struct { - CacheRoot string - Username string - Host string - Port string - Auth SSHAuth -} - -func InitializeSFTPBackend(l log.Logger, c SFTPConfig, debug bool) (cache.Backend, error) { - sshClient, err := getSSHClient(c) - if err != nil { - return nil, err - } - - sftpClient, err := sftp.NewClient(sshClient) - if err != nil { - return nil, fmt.Errorf("unable to connect to ssh with sftp protocol %w", err) - } - - level.Debug(l).Log("msg", "sftp backend", "config", fmt.Sprintf("%+v", c)) - - return newSftpBackend(sftpClient, c.CacheRoot), nil -} - -func getSSHClient(c SFTPConfig) (*ssh.Client, error) { - authMethod, err := getAuthMethod(c) - if err != nil { - return nil, fmt.Errorf("unable to get ssh auth method %w", err) - } - - /* #nosec */ - client, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.Host, c.Port), &ssh.ClientConfig{ - User: c.Username, - Auth: authMethod, - HostKeyCallback: ssh.InsecureIgnoreHostKey(), // #nosec just a workaround for now, will fix - }) - if err != nil { - return nil, fmt.Errorf("unable to connect to ssh %w", err) - } - - return client, nil -} - -func getAuthMethod(c SFTPConfig) ([]ssh.AuthMethod, error) { - if c.Auth.Method == SSHAuthMethodPassword { - return []ssh.AuthMethod{ - ssh.Password(c.Auth.Password), - }, nil - } else if c.Auth.Method == SSHAuthMethodPublicKeyFile { - pkAuthMethod, err := readPublicKeyFile(c.Auth.PublicKeyFile) - return []ssh.AuthMethod{ - pkAuthMethod, - }, err - } - - return nil, errors.New("ssh method auth is not recognized, should be PASSWORD or PUBLIC_KEY_FILE") -} - -func readPublicKeyFile(file string) (ssh.AuthMethod, error) { - buffer, err := ioutil.ReadFile(file) - if err != nil { - return nil, fmt.Errorf("unable to read file <%s> %w", file, err) - } - - key, err := ssh.ParsePrivateKey(buffer) - if err != nil { - return nil, fmt.Errorf("unable to parse private key %w", err) - } - - return ssh.PublicKeys(key), nil -} - -// CloudStorageConfig is a structure to store Cloud Storage backend configuration -type CloudStorageConfig struct { - Bucket string - ACL string - Encryption string - Endpoint string - APIKey string -} - -// InitializeGCSBackend creates a Cloud Storage backend -func InitializeGCSBackend(l log.Logger, c CloudStorageConfig, debug bool) (cache.Backend, error) { - var opts []option.ClientOption - if c.APIKey != "" { - opts = append(opts, option.WithAPIKey(c.APIKey)) - } - - if c.Endpoint != "" { - opts = append(opts, option.WithEndpoint(c.Endpoint)) - } - - if debug { - level.Debug(l).Log("msg", "gc storage backend", "config", fmt.Sprintf("%+v", c)) - } - - return newGCS(c.Bucket, c.ACL, c.Encryption, opts...) -} diff --git a/cache/backend/filesystem.go b/cache/backend/filesystem.go deleted file mode 100644 index df4238fb..00000000 --- a/cache/backend/filesystem.go +++ /dev/null @@ -1,55 +0,0 @@ -package backend - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/meltwater/drone-cache/cache" -) - -// filesystem is an file system implementation of the Backend -type filesystem struct { - cacheRoot string -} - -// newFileSystem returns a new file system Backend implementation -func newFileSystem(cacheRoot string) cache.Backend { - return &filesystem{cacheRoot: cacheRoot} -} - -// Get returns an io.Reader for reading the contents of the file -func (c *filesystem) Get(p string) (io.ReadCloser, error) { - absPath, err := filepath.Abs(filepath.Clean(filepath.Join(c.cacheRoot, p))) - if err != nil { - return nil, fmt.Errorf("get the object %w", err) - } - - return os.Open(absPath) -} - -// Put uploads the contents of the io.ReadSeeker -func (c *filesystem) Put(p string, src io.ReadSeeker) error { - absPath, err := filepath.Abs(filepath.Clean(filepath.Join(c.cacheRoot, p))) - if err != nil { - return fmt.Errorf("build path %w", err) - } - - dir := filepath.Dir(absPath) - if err := os.MkdirAll(dir, os.FileMode(0755)); err != nil { //nolint:mnd 755 is not a magic number - return fmt.Errorf("create directory <%s> %w", dir, err) - } - - dst, err := os.Create(absPath) - if err != nil { - return fmt.Errorf("create cache file <%s> %w", absPath, err) - } - defer dst.Close() - - if _, err := io.Copy(dst, src); err != nil { - return fmt.Errorf("write read seeker as file %w", err) - } - - return nil -} diff --git a/cache/backend/filesystem_test.go b/cache/backend/filesystem_test.go deleted file mode 100644 index dc8d844e..00000000 --- a/cache/backend/filesystem_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package backend - -import ( - "testing" -) - -func TestFileSystemTruth(t *testing.T) { - t.Skip("skipping backend package tests") -} diff --git a/cache/backend/gcs.go b/cache/backend/gcs.go deleted file mode 100644 index 4a483634..00000000 --- a/cache/backend/gcs.go +++ /dev/null @@ -1,63 +0,0 @@ -package backend - -import ( - "context" - "io" - - "github.com/meltwater/drone-cache/cache" - - "cloud.google.com/go/storage" - "google.golang.org/api/option" -) - -// gcsBackend is an Cloud Storage implementation of the Backend -type gcsBackend struct { - bucket string - acl string - encryption string - client *storage.Client -} - -// newGCS returns a new Cloud Storage remote Backend implemented -func newGCS(bucket, acl, encryption string, opts ...option.ClientOption) (cache.Backend, error) { - ctx := context.Background() - client, err := storage.NewClient(ctx, opts...) - - if err != nil { - return nil, err - } - - return &gcsBackend{ - bucket: bucket, - acl: acl, - encryption: encryption, - client: client, - }, nil -} - -// Get returns an io.Reader for reading the contents of the file -func (c *gcsBackend) Get(p string) (io.ReadCloser, error) { - bkt := c.client.Bucket(c.bucket) - obj := bkt.Object(p) - - if c.encryption != "" { - obj = obj.Key([]byte(c.encryption)) - } - - return obj.NewReader(context.TODO()) -} - -// Put uploads the contents of the io.ReadSeeker -func (c *gcsBackend) Put(p string, src io.ReadSeeker) error { - bkt := c.client.Bucket(c.bucket) - - obj := bkt.Object(p) - if c.encryption != "" { - obj = obj.Key([]byte(c.encryption)) - } - - w := obj.NewWriter(context.TODO()) - _, err := io.Copy(w, src) - - return err -} diff --git a/cache/backend/gcs_test.go b/cache/backend/gcs_test.go deleted file mode 100644 index c6215a5f..00000000 --- a/cache/backend/gcs_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package backend - -import ( - "testing" -) - -func TestCloudStorageTruth(t *testing.T) { - t.Skip("skipping backend package tests") -} diff --git a/cache/backend/s3.go b/cache/backend/s3.go deleted file mode 100644 index 77b41a7d..00000000 --- a/cache/backend/s3.go +++ /dev/null @@ -1,64 +0,0 @@ -package backend - -import ( - "fmt" - "io" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/s3" - - "github.com/meltwater/drone-cache/cache" -) - -// s3Backend is an S3 implementation of the Backend -type s3Backend struct { - bucket string - acl string - encryption string - client *s3.S3 -} - -// newS3 returns a new S3 remote Backend implemented -func newS3(bucket, acl, encryption string, conf *aws.Config) cache.Backend { - client := s3.New(session.Must(session.NewSessionWithOptions(session.Options{})), conf) - - return &s3Backend{ - bucket: bucket, - acl: acl, - encryption: encryption, - client: client, - } -} - -// Get returns an io.Reader for reading the contents of the file -func (c *s3Backend) Get(p string) (io.ReadCloser, error) { - out, err := c.client.GetObject(&s3.GetObjectInput{ - Bucket: aws.String(c.bucket), - Key: aws.String(p), - }) - if err != nil { - return nil, fmt.Errorf("get the object %w", err) - } - - return out.Body, nil -} - -// Put uploads the contents of the io.ReadSeeker -func (c *s3Backend) Put(p string, src io.ReadSeeker) error { - in := &s3.PutObjectInput{ - Bucket: aws.String(c.bucket), - Key: aws.String(p), - ACL: aws.String(c.acl), - Body: src, - } - if c.encryption != "" { - in.ServerSideEncryption = aws.String(c.encryption) - } - - if _, err := c.client.PutObject(in); err != nil { - return fmt.Errorf("put the object %w", err) - } - - return nil -} diff --git a/cache/backend/s3_test.go b/cache/backend/s3_test.go deleted file mode 100644 index 0baec4ad..00000000 --- a/cache/backend/s3_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package backend - -import ( - "testing" -) - -func TestS3Truth(t *testing.T) { - t.Skip("skipping backend package tests") -} diff --git a/cache/backend/sftp.go b/cache/backend/sftp.go deleted file mode 100644 index 554f501b..00000000 --- a/cache/backend/sftp.go +++ /dev/null @@ -1,48 +0,0 @@ -package backend - -import ( - "fmt" - "io" - "path/filepath" - - "github.com/pkg/sftp" -) - -type sftpBackend struct { - client *sftp.Client - cacheRoot string -} - -func newSftpBackend(client *sftp.Client, cacheRoot string) *sftpBackend { - return &sftpBackend{client: client, cacheRoot: cacheRoot} -} - -func (s sftpBackend) Get(path string) (io.ReadCloser, error) { - absPath, err := filepath.Abs(filepath.Clean(filepath.Join(s.cacheRoot, path))) - if err != nil { - return nil, fmt.Errorf("get the object %w", err) - } - - return s.client.Open(absPath) -} - -func (s sftpBackend) Put(path string, src io.ReadSeeker) error { - pathJoin := filepath.Join(s.cacheRoot, path) - - dir := filepath.Dir(pathJoin) - if err := s.client.MkdirAll(dir); err != nil { - return fmt.Errorf("create directory <%s> %w", dir, err) - } - - dst, err := s.client.Create(pathJoin) - if err != nil { - return fmt.Errorf("create cache file <%s> %w", pathJoin, err) - } - defer dst.Close() - - if _, err := io.Copy(dst, src); err != nil { - return fmt.Errorf("write read seeker as file %w", err) - } - - return nil -} diff --git a/cache/backend/sftp_test.go b/cache/backend/sftp_test.go deleted file mode 100644 index 08b8aae1..00000000 --- a/cache/backend/sftp_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package backend - -import ( - "bytes" - "io/ioutil" - "os" - "testing" - - "github.com/go-kit/kit/log" -) - -const defaultSFTPHost = "127.0.0.1" -const defaultSFTPPort = "22" - -var host = getEnv("TEST_SFTP_HOST", defaultSFTPHost) -var port = getEnv("TEST_SFTP_PORT", defaultSFTPPort) - -func TestSFTPTruth(t *testing.T) { - cli, err := InitializeSFTPBackend(log.NewNopLogger(), - SFTPConfig{ - CacheRoot: "/upload", - Username: "foo", - Auth: SSHAuth{ - Password: "pass", - Method: SSHAuthMethodPassword, - }, - Host: host, - Port: port, - }, true) - if err != nil { - t.Fatal(err) - } - - content := "Hello world4" - - // PUT TEST - file, _ := os.Create("test") - _, _ = file.Write([]byte(content)) - _, _ = file.Seek(0, 0) - err = cli.Put("test3.t", file) - if err != nil { - t.Fatal(err) - } - _ = file.Close() - - // GET TEST - readCloser, err := cli.Get("test3.t") - if err != nil { - t.Fatal(err) - } - b, _ := ioutil.ReadAll(readCloser) - if !bytes.Equal(b, []byte(content)) { - t.Fatal(string(b), "!=", content) - } - - _ = os.Remove("test") -} - -func getEnv(key, defaultVal string) string { - value, ok := os.LookupEnv(key) - if !ok { - return defaultVal - } - return value -} diff --git a/storage/backend/azure/azure.go b/storage/backend/azure/azure.go new file mode 100644 index 00000000..2be21b7e --- /dev/null +++ b/storage/backend/azure/azure.go @@ -0,0 +1,125 @@ +package azure + +import ( + "context" + "fmt" + "io" + "net/url" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/internal" +) + +const ( + // DefaultBlobMaxRetryRequests TODO + DefaultBlobMaxRetryRequests = 4 + + defaultBufferSize = 3 * 1024 * 1024 + defaultMaxBuffers = 4 +) + +// Backend TODO +type Backend struct { + logger log.Logger + + cfg Config + containerURL azblob.ContainerURL +} + +// New creates an AzureBlob backend. +func New(l log.Logger, c Config) (*Backend, error) { + // 1. From the Azure portal, get your storage account name and key and set environment variables. + if c.AccountName == "" || c.AccountKey == "" { + return nil, fmt.Errorf("either the AZURE_ACCOUNT_NAME or AZURE_ACCOUNT_KEY environment variable is not set") + } + + // 2. Create a default request pipeline using your storage account name and account key. + credential, err := azblob.NewSharedKeyCredential(c.AccountName, c.AccountKey) + if err != nil { + return nil, fmt.Errorf("azure, invalid credentials %w", err) + } + + // 3. Azurite has different URL pattern than production Azure Blob Storage. + var blobURL *url.URL + if c.Azurite { + blobURL, err = url.Parse(fmt.Sprintf("http://%s/%s/%s", c.BlobStorageURL, c.AccountName, c.ContainerName)) + } else { + blobURL, err = url.Parse(fmt.Sprintf("https://%s.%s/%s", c.AccountName, c.BlobStorageURL, c.ContainerName)) + } + + if err != nil { + level.Error(l).Log("msg", "can't create url with : "+err.Error()) + } + + pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{}) + containerURL := azblob.NewContainerURL(*blobURL, pipeline) + + // 4. Always creating new container, it will throw error if it already exists. + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() + + _, err = containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) + if err != nil { + ret, ok := err.(azblob.StorageError) + if !ok { + return nil, fmt.Errorf("azure, unexpected error %w", err) + } + + if ret.ServiceCode() == "ContainerAlreadyExists" { + level.Error(l).Log("msg", "container already exists", "err", err) + } + } + + return &Backend{logger: l, cfg: c, containerURL: containerURL}, nil +} + +// Get writes downloaded content to the given writer. +func (b *Backend) Get(ctx context.Context, p string, w io.Writer) (err error) { + errCh := make(chan error) + + go func() { + defer close(errCh) + + blobURL := b.containerURL.NewBlockBlobURL(p) + + resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) + if err != nil { + errCh <- fmt.Errorf("get the object %w", err) + return + } + + rc := resp.Body(azblob.RetryReaderOptions{MaxRetryRequests: b.cfg.MaxRetryRequests}) + defer internal.CloseWithErrLogf(b.logger, rc, "response body, close defer") + + _, err = io.Copy(w, rc) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Put uploads contents of the given reader. +func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { + b.logger.Log("msg", "uploading the file with blob", "name", p) + + blobURL := b.containerURL.NewBlockBlobURL(p) + if _, err := azblob.UploadStreamToBlockBlob(ctx, r, blobURL, + azblob.UploadStreamToBlockBlobOptions{ + BufferSize: defaultBufferSize, + MaxBuffers: defaultMaxBuffers, + }, + ); err != nil { + return fmt.Errorf("put the object %w", err) + } + + return nil +} diff --git a/storage/backend/azure/azure_test.go b/storage/backend/azure/azure_test.go new file mode 100644 index 00000000..626ac601 --- /dev/null +++ b/storage/backend/azure/azure_test.go @@ -0,0 +1,78 @@ +// +build integration + +package azure + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/go-kit/kit/log" + "github.com/meltwater/drone-cache/test" +) + +const ( + defaultBlobStorageURL = "127.0.0.1:10000" + defaultAccountName = "devstoreaccount1" + defaultAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + defaultContainerName = "testcontainer" +) + +var ( + blobURL = getEnv("TEST_AZURITE_URL", defaultBlobStorageURL) + accountName = getEnv("TEST_ACCOUNT_NAME", defaultAccountName) + accountKey = getEnv("TEST_ACCOUNT_KEY", defaultAccountKey) + containerName = getEnv("TEST_CONTAINER_NAME", defaultContainerName) +) + +func TestRoundTrip(t *testing.T) { + t.Parallel() + + backend, cleanUp := setup(t) + t.Cleanup(cleanUp) + + content := "Hello world4" + + // Test Put + test.Ok(t, backend.Put(context.TODO(), "test.t", strings.NewReader(content))) + + // Test Get + var buf bytes.Buffer + test.Ok(t, backend.Get(context.TODO(), "test.t", &buf)) + + b, err := ioutil.ReadAll(&buf) + test.Ok(t, err) + + test.Equals(t, []byte(content), b) +} + +// Helpers + +func setup(t *testing.T) (*Backend, func()) { + b, err := New( + log.NewNopLogger(), + Config{ + AccountName: accountName, + AccountKey: accountKey, + ContainerName: containerName, + BlobStorageURL: blobURL, + Azurite: true, + Timeout: 30 * time.Second, + }, + ) + test.Ok(t, err) + + return b, func() {} +} + +func getEnv(key, defaultVal string) string { + value, ok := os.LookupEnv(key) + if !ok { + return defaultVal + } + return value +} diff --git a/storage/backend/azure/config.go b/storage/backend/azure/config.go new file mode 100644 index 00000000..f2025b65 --- /dev/null +++ b/storage/backend/azure/config.go @@ -0,0 +1,14 @@ +package azure + +import "time" + +// Config is a structure to store Azure backend configuration +type Config struct { + AccountName string + AccountKey string + ContainerName string + BlobStorageURL string + Azurite bool + MaxRetryRequests int + Timeout time.Duration +} diff --git a/storage/backend/backend.go b/storage/backend/backend.go new file mode 100644 index 00000000..d1401b02 --- /dev/null +++ b/storage/backend/backend.go @@ -0,0 +1,89 @@ +package backend + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + + "github.com/meltwater/drone-cache/storage/backend/azure" + "github.com/meltwater/drone-cache/storage/backend/filesystem" + "github.com/meltwater/drone-cache/storage/backend/gcs" + "github.com/meltwater/drone-cache/storage/backend/s3" + "github.com/meltwater/drone-cache/storage/backend/sftp" +) + +const ( + // Azure type of the corresponding backend represented as string constant. + Azure = "azure" + // FileSystem type of the corresponding backend represented as string constant. + FileSystem = "filesystem" + // GCS type of the corresponding backend represented as string constant. + GCS = "gcs" + // S3 type of the corresponding backend represented as string constant. + S3 = "s3" + // SFTP type of the corresponding backend represented as string constant. + SFTP = "sftp" +) + +// MOTICE: FileEntry needs a better place. + +// FileEntry defines a single cache item. +type FileEntry struct { + Path string + Size int64 + LastModified time.Time +} + +// Backend implements operations for caching files. +type Backend interface { + // Get writes downloaded content to the given writer. + Get(ctx context.Context, p string, w io.Writer) error + + // Put uploads contents of the given reader. + Put(ctx context.Context, p string, r io.Reader) error + + // Implement me! + // List(ctx context.Context, p string) ([]FileEntry, error) + + // Implement me! + // Delete(ctx context.Context, p string) error +} + +// FromConfig creates new Backend by initializing using given configuration. +func FromConfig(l log.Logger, backedType string, cfg Config) (Backend, error) { + var ( + b Backend + err error + ) + + switch backedType { + case Azure: + level.Warn(l).Log("msg", "using azure blob as backend") + b, err = azure.New(log.With(l, "backend", Azure), cfg.Azure) + case S3: + level.Warn(l).Log("msg", "using aws s3 as backend") + b, err = s3.New(log.With(l, "backend", S3), cfg.S3, cfg.Debug) + case GCS: + level.Warn(l).Log("msg", "using gc storage as backend") + b, err = gcs.New(log.With(l, "backend", GCS), cfg.GCS) + case FileSystem: + level.Warn(l).Log("msg", "using filesystem as backend") + b, err = filesystem.New(log.With(l, "backend", FileSystem), cfg.FileSystem) + case SFTP: + level.Warn(l).Log("msg", "using sftp as backend") + b, err = sftp.New(log.With(l, "backend", SFTP), cfg.SFTP) + default: + return nil, errors.New("unknown backend") + } + + if err != nil { + return nil, fmt.Errorf("initialize backend %w", err) + } + + return b, nil +} diff --git a/storage/backend/config.go b/storage/backend/config.go new file mode 100644 index 00000000..ef40b4c6 --- /dev/null +++ b/storage/backend/config.go @@ -0,0 +1,20 @@ +package backend + +import ( + "github.com/meltwater/drone-cache/storage/backend/azure" + "github.com/meltwater/drone-cache/storage/backend/filesystem" + "github.com/meltwater/drone-cache/storage/backend/gcs" + "github.com/meltwater/drone-cache/storage/backend/s3" + "github.com/meltwater/drone-cache/storage/backend/sftp" +) + +// Config configures behavior of Backend. +type Config struct { + Debug bool + + S3 s3.Config + FileSystem filesystem.Config + SFTP sftp.Config + Azure azure.Config + GCS gcs.Config +} diff --git a/storage/backend/filesystem/config.go b/storage/backend/filesystem/config.go new file mode 100644 index 00000000..6a3cb850 --- /dev/null +++ b/storage/backend/filesystem/config.go @@ -0,0 +1,6 @@ +package filesystem + +// Config is a structure to store filesystem backend configuration +type Config struct { + CacheRoot string +} diff --git a/storage/backend/filesystem/filesystem.go b/storage/backend/filesystem/filesystem.go new file mode 100644 index 00000000..b9157723 --- /dev/null +++ b/storage/backend/filesystem/filesystem.go @@ -0,0 +1,117 @@ +package filesystem + +import ( + "context" + "fmt" + "io" + "os" + "path" + "path/filepath" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/internal" +) + +const defaultFileMode = 0755 + +// Backend is an file system implementation of the Backend. +type Backend struct { + logger log.Logger + + cacheRoot string +} + +// New creates a Backend backend. +func New(l log.Logger, c Config) (*Backend, error) { + if strings.TrimRight(path.Clean(c.CacheRoot), "/") == "" { + return nil, fmt.Errorf("empty or root path given, <%s> as cache root, ", c.CacheRoot) + } + + //nolint: TODO(kakkoyun): Should it be created? + if _, err := os.Stat(c.CacheRoot); err != nil { + return nil, fmt.Errorf("make sure volume is mounted, <%s> as cache root %w", c.CacheRoot, err) + } + + level.Debug(l).Log("msg", "Filesystem backend", "config", fmt.Sprintf("%#v", c)) + + return &Backend{logger: l, cacheRoot: c.CacheRoot}, nil +} + +// Get writes downloaded content to the given writer. +func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { + path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) + if err != nil { + return fmt.Errorf("absolute path %w", err) + } + + errCh := make(chan error) + + go func() { + defer close(errCh) + + rc, err := os.Open(path) + if err != nil { + errCh <- fmt.Errorf("get the object %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, rc, "response body, close defer") + + _, err = io.Copy(w, rc) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Put uploads contents of the given reader. +func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { + path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) + if err != nil { + return fmt.Errorf("build path %w", err) + } + + errCh := make(chan error) + + go func() { + defer close(errCh) + + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, os.FileMode(defaultFileMode)); err != nil { + errCh <- fmt.Errorf("create directory %w", err) + return + } + + w, err := os.Create(path) + if err != nil { + errCh <- fmt.Errorf("create cache file %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, w, "file writer, close defer") + + if _, err := io.Copy(w, r); err != nil { + errCh <- fmt.Errorf("write contents of reader to a file %w", err) + } + + if err := w.Close(); err != nil { + errCh <- fmt.Errorf("close the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/storage/backend/filesystem/filesystem_test.go b/storage/backend/filesystem/filesystem_test.go new file mode 100644 index 00000000..5bd6f56d --- /dev/null +++ b/storage/backend/filesystem/filesystem_test.go @@ -0,0 +1,47 @@ +package filesystem + +import ( + "bytes" + "context" + "io/ioutil" + "strings" + "testing" + + "github.com/go-kit/kit/log" + "github.com/meltwater/drone-cache/test" +) + +func TestRoundTrip(t *testing.T) { + t.Parallel() + + backend, cleanUp := setup(t) + t.Cleanup(cleanUp) + + content := "Hello world4" + + // Test Put + test.Ok(t, backend.Put(context.TODO(), "test.t", strings.NewReader(content))) + + // Test Get + var buf bytes.Buffer + test.Ok(t, backend.Get(context.TODO(), "test.t", &buf)) + + b, err := ioutil.ReadAll(&buf) + test.Ok(t, err) + + test.Equals(t, []byte(content), b) +} + +// Helpers + +func setup(t *testing.T) (*Backend, func()) { + dir, cleanUp := test.CreateTempDir(t, "filesystem-test") + + b, err := New( + log.NewNopLogger(), + Config{CacheRoot: dir}, + ) + test.Ok(t, err) + + return b, func() { cleanUp() } +} diff --git a/storage/backend/gcs/config.go b/storage/backend/gcs/config.go new file mode 100644 index 00000000..72b3385c --- /dev/null +++ b/storage/backend/gcs/config.go @@ -0,0 +1,14 @@ +package gcs + +import "time" + +// Config is a structure to store Cloud Storage backend configuration +type Config struct { + Bucket string + ACL string + Encryption string + Endpoint string + APIKey string + JSONKey string + Timeout time.Duration +} diff --git a/storage/backend/gcs/gcs.go b/storage/backend/gcs/gcs.go new file mode 100644 index 00000000..092a11a1 --- /dev/null +++ b/storage/backend/gcs/gcs.go @@ -0,0 +1,182 @@ +package gcs + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "net/http" + "strings" + + "github.com/meltwater/drone-cache/internal" + + gcstorage "cloud.google.com/go/storage" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +// Backend is an Cloud Storage implementation of the Backend. +type Backend struct { + logger log.Logger + + bucket string + acl string + encryption string + client *gcstorage.Client +} + +// New creates a Google Cloud Storage backend. +func New(l log.Logger, c Config) (*Backend, error) { + var opts []option.ClientOption + + level.Debug(l).Log("msg", "gc storage backend", "config", fmt.Sprintf("%+v", c)) + + if c.Endpoint != "" { + opts = append(opts, option.WithEndpoint(c.Endpoint)) + } + + if !strings.HasPrefix(c.Endpoint, "https://") { // This is not settable from outside world, only used for mock tests. + opts = append(opts, option.WithHTTPClient(&http.Client{Transport: &http.Transport{ + // ignore unverified/expired SSL certificates for tests. + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + }})) + } + + setAuthenticationMethod(l, c, opts) + + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() + + client, err := gcstorage.NewClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("gcs client initialization %w", err) + } + + return &Backend{ + logger: l, + bucket: c.Bucket, + acl: c.ACL, + encryption: c.Encryption, + client: client, + }, nil +} + +// Get writes downloaded content to the given writer. +func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { + errCh := make(chan error) + + go func() { + defer close(errCh) + + bkt := b.client.Bucket(b.bucket) + obj := bkt.Object(p) + + if b.encryption != "" { + obj = obj.Key([]byte(b.encryption)) + } + + r, err := obj.NewReader(ctx) + if err != nil { + errCh <- fmt.Errorf("get the object %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, r, "response body, close defer") + + _, err = io.Copy(w, r) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Put uploads contents of the given reader. +func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { + errCh := make(chan error) + + go func() { + defer close(errCh) + + bkt := b.client.Bucket(b.bucket) + obj := bkt.Object(p) + + if b.encryption != "" { + obj = obj.Key([]byte(b.encryption)) + } + + w := obj.NewWriter(ctx) + defer internal.CloseWithErrLogf(b.logger, w, "object writer, close defer") + + _, err := io.Copy(w, r) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + + if err := w.Close(); err != nil { + errCh <- fmt.Errorf("close the object %w", err) + } + + if b.acl != "" { + if err := obj.ACL().Set(ctx, gcstorage.AllAuthenticatedUsers, gcstorage.ACLRole(b.acl)); err != nil { + errCh <- fmt.Errorf("set ACL of the object %w", err) + } + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Helpers + +func setAuthenticationMethod(l log.Logger, c Config, opts []option.ClientOption) []option.ClientOption { + if c.APIKey != "" { + opts = append(opts, option.WithAPIKey(c.APIKey)) + return opts + } + + creds, err := credentials(l, c) + if err == nil { + opts = append(opts, option.WithCredentials(creds)) + return opts + } + + level.Error(l).Log("msg", "gc storage credential", "err", err) + level.Warn(l).Log("msg", "initializing gcs without authentication") + + opts = append(opts, option.WithoutAuthentication()) + + return opts +} + +func credentials(l log.Logger, c Config) (*google.Credentials, error) { + ctx, cancel := context.WithTimeout(context.Background(), c.Timeout) + defer cancel() + + creds, err := google.CredentialsFromJSON(ctx, []byte(c.JSONKey), gcstorage.ScopeFullControl) + if err == nil { + return creds, nil + } + + level.Error(l).Log("msg", "gc storage credentials from api-key", "err", err) + + creds, err = google.FindDefaultCredentials(ctx, gcstorage.ScopeFullControl) + if err != nil { + return nil, err + } + + return creds, nil +} diff --git a/storage/backend/gcs/gcs_test.go b/storage/backend/gcs/gcs_test.go new file mode 100644 index 00000000..d81a2727 --- /dev/null +++ b/storage/backend/gcs/gcs_test.go @@ -0,0 +1,140 @@ +// +build integration + +package gcs + +import ( + "bytes" + "context" + "crypto/tls" + "io/ioutil" + "net/http" + "os" + "strings" + "testing" + "time" + + gcstorage "cloud.google.com/go/storage" + "github.com/go-kit/kit/log" + "github.com/meltwater/drone-cache/test" + "google.golang.org/api/option" +) + +const ( + defaultEndpoint = "http://127.0.0.1:4443/storage/v1/" + defaultPublicHost = "localhost:4443" + defaultApiKey = "" + bucketName = "gcs-round-trip" +) + +var ( + endpoint = getEnv("TEST_GCS_ENDPOINT", defaultEndpoint) + apiKey = getEnv("TEST_GCS_API_KEY", defaultApiKey) + publicHost = getEnv("TEST_STORAGE_EMULATOR_HOST", defaultPublicHost) +) + +func TestRoundTrip(t *testing.T) { + t.Parallel() + + backend, cleanUp := setup(t) + t.Cleanup(cleanUp) + + content := "Hello world4" + + // Test Put + test.Ok(t, backend.Put(context.TODO(), "test.txt", strings.NewReader(content))) + + // Test Get + backend = getBackend(t, bucketName) // This weird env set and unset dance has to be made to mak it work with GCP client. + var buf bytes.Buffer + test.Ok(t, backend.Get(context.Background(), "test.txt", &buf)) + + b, err := ioutil.ReadAll(&buf) + test.Ok(t, err) + + test.Equals(t, []byte(content), b) +} + +// Helpers + +func setup(t *testing.T) (*Backend, func()) { + client := newClient(t) + bucket := client.Bucket(bucketName) + + test.Ok(t, bucket.Create(context.Background(), "drone-cache", &gcstorage.BucketAttrs{})) + + return putBackend(t, bucketName), func() { + _ = os.Unsetenv("STORAGE_EMULATOR_HOST") + _ = bucket.Delete(context.Background()) + _ = client.Close() + } +} + +func putBackend(t *testing.T, bucketName string) *Backend { + b, err := New( + log.NewLogfmtLogger(os.Stdout), + Config{ + Bucket: bucketName, + Endpoint: endpoint, + APIKey: apiKey, + Timeout: 30 * time.Second, + }, + ) + test.Ok(t, err) + + return b +} + +func getBackend(t *testing.T, bucketName string) *Backend { + // This weird env set and unset dance has to be made to mak it work with GCP client. + if _, ok := os.LookupEnv("STORAGE_EMULATOR_HOST"); !ok { + test.Ok(t, os.Setenv("STORAGE_EMULATOR_HOST", publicHost)) + } + + b, err := New( + log.NewLogfmtLogger(os.Stdout), + Config{ + Bucket: bucketName, + Endpoint: endpoint, + APIKey: apiKey, + Timeout: 30 * time.Second, + }, + ) + test.Ok(t, err) + + _ = os.Unsetenv("STORAGE_EMULATOR_HOST") + return b +} + +func newClient(t *testing.T) *gcstorage.Client { + var opts []option.ClientOption + + if apiKey != "" { + opts = append(opts, option.WithAPIKey(apiKey)) + } else { + opts = append(opts, option.WithoutAuthentication()) + } + + if endpoint != "" { + opts = append(opts, option.WithEndpoint(endpoint)) + } + + if !strings.HasPrefix(endpoint, "https://") { + opts = append(opts, option.WithHTTPClient(&http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates + }})) + } + + client, err := gcstorage.NewClient(context.Background(), opts...) + test.Ok(t, err) + + return client +} + +func getEnv(key, defaultVal string) string { + value, ok := os.LookupEnv(key) + if !ok { + return defaultVal + } + + return value +} diff --git a/storage/backend/s3/config.go b/storage/backend/s3/config.go new file mode 100644 index 00000000..a71942b0 --- /dev/null +++ b/storage/backend/s3/config.go @@ -0,0 +1,31 @@ +package s3 + +// Config is a structure to store S3 backend configuration +type Config struct { + // Indicates the files ACL, which should be one, + // of the following: + // private + // public-read + // public-read-write + // authenticated-read + // bucket-owner-read + // bucket-owner-full-control + ACL string + Bucket string + Encryption string // if not "", enables server-side encryption. valid values are: AES256, aws:kms + Endpoint string + Key string + + // us-east-1 + // us-west-1 + // us-west-2 + // eu-west-1 + // ap-southeast-1 + // ap-southeast-2 + // ap-northeast-1 + // sa-east-1 + Region string + Secret string + + PathStyle bool // Use path style instead of domain style. Should be true for minio and false for AWS +} diff --git a/storage/backend/s3/s3.go b/storage/backend/s3/s3.go new file mode 100644 index 00000000..ec397189 --- /dev/null +++ b/storage/backend/s3/s3.go @@ -0,0 +1,116 @@ +package s3 + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/internal" +) + +// Backend TODO +type Backend struct { + logger log.Logger + + bucket string + acl string + encryption string + client *s3.S3 +} + +// New creates an S3 backend. +func New(l log.Logger, c Config, debug bool) (*Backend, error) { + conf := &aws.Config{ + Region: aws.String(c.Region), + Endpoint: &c.Endpoint, + DisableSSL: aws.Bool(!strings.HasPrefix(c.Endpoint, "https://")), + S3ForcePathStyle: aws.Bool(c.PathStyle), + } + + if c.Key != "" && c.Secret != "" { + conf.Credentials = credentials.NewStaticCredentials(c.Key, c.Secret, "") + } else { + level.Warn(l).Log("msg", "aws key and/or Secret not provided (falling back to anonymous credentials)") + } + + level.Debug(l).Log("msg", "s3 backend", "config", fmt.Sprintf("%#v", c)) + + if debug { + conf.WithLogLevel(aws.LogDebugWithHTTPBody) + } + + client := s3.New(session.Must(session.NewSessionWithOptions(session.Options{})), conf) + + return &Backend{ + logger: l, + bucket: c.Bucket, + acl: c.ACL, + encryption: c.Encryption, + client: client, + }, nil +} + +// Get writes downloaded content to the given writer. +func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { + in := &s3.GetObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(p), + } + + errCh := make(chan error) + + go func() { + defer close(errCh) + + out, err := b.client.GetObjectWithContext(ctx, in) + if err != nil { + errCh <- fmt.Errorf("get the object %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, out.Body, "response body, close defer") + + _, err = io.Copy(w, out.Body) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Put uploads contents of the given reader. +func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { + var ( + uploader = s3manager.NewUploaderWithClient(b.client) + in = &s3manager.UploadInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(p), + ACL: aws.String(b.acl), + Body: r, + } + ) + + if b.encryption != "" { + in.ServerSideEncryption = aws.String(b.encryption) + } + + if _, err := uploader.UploadWithContext(ctx, in); err != nil { + return fmt.Errorf("put the object %w", err) + } + + return nil +} diff --git a/storage/backend/s3/s3_test.go b/storage/backend/s3/s3_test.go new file mode 100644 index 00000000..2971231b --- /dev/null +++ b/storage/backend/s3/s3_test.go @@ -0,0 +1,110 @@ +// +build integration + +package s3 + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/go-kit/kit/log" + + "github.com/meltwater/drone-cache/test" +) + +const ( + defaultEndpoint = "127.0.0.1:9000" + defaultAccessKey = "AKIAIOSFODNN7EXAMPLE" + defaultSecretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + defaultRegion = "eu-west-1" + defaultACL = "private" +) + +var ( + endpoint = getEnv("TEST_S3_ENDPOINT", defaultEndpoint) + accessKey = getEnv("TEST_S3_ACCESS_KEY", defaultAccessKey) + secretAccessKey = getEnv("TEST_S3_SECRET_KEY", defaultSecretAccessKey) + acl = getEnv("TEST_S3_ACL", defaultACL) +) + +func TestRoundTrip(t *testing.T) { + t.Parallel() + + backend, cleanUp := setup(t) + t.Cleanup(cleanUp) + + content := "Hello world4" + + // Test Put + test.Ok(t, backend.Put(context.TODO(), "test.t", strings.NewReader(content))) + + // Test Get + var buf bytes.Buffer + test.Ok(t, backend.Get(context.TODO(), "test.t", &buf)) + + b, err := ioutil.ReadAll(&buf) + test.Ok(t, err) + + test.Equals(t, []byte(content), b) +} + +// Helpers + +func setup(t *testing.T) (*Backend, func()) { + client := newClient() + bucket := "s3-round-trip" + + _, err := client.CreateBucketWithContext(context.Background(), &s3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) + test.Ok(t, err) + + b, err := New( + log.NewNopLogger(), + Config{ + ACL: acl, + Bucket: bucket, + Endpoint: endpoint, + Key: accessKey, + PathStyle: true, // Should be true for minio and false for AWS. + Region: defaultRegion, + Secret: secretAccessKey, + }, + false, + ) + test.Ok(t, err) + + return b, func() { + _, _ = client.DeleteBucket(&s3.DeleteBucketInput{ + Bucket: aws.String(bucket), + }) + } +} + +func newClient() *s3.S3 { + conf := &aws.Config{ + Region: aws.String(defaultRegion), + Endpoint: aws.String(endpoint), + DisableSSL: aws.Bool(!strings.HasPrefix(endpoint, "https://")), + S3ForcePathStyle: aws.Bool(true), + Credentials: credentials.NewStaticCredentials(accessKey, secretAccessKey, ""), + } + + return s3.New(session.Must(session.NewSessionWithOptions(session.Options{})), conf) +} + +func getEnv(key, defaultVal string) string { + value, ok := os.LookupEnv(key) + if !ok { + return defaultVal + } + + return value +} diff --git a/storage/backend/sftp/config.go b/storage/backend/sftp/config.go new file mode 100644 index 00000000..6d1fdd31 --- /dev/null +++ b/storage/backend/sftp/config.go @@ -0,0 +1,28 @@ +package sftp + +import "time" + +// SSHAuthMethod describes the type of authentication method. +type SSHAuthMethod string + +const ( + SSHAuthMethodPassword SSHAuthMethod = "PASSWORD" + SSHAuthMethodPublicKeyFile SSHAuthMethod = "PUBLIC_KEY_FILE" +) + +// SSHAuth is a structure to store authentication information for SSH connection. +type SSHAuth struct { + Password string + PublicKeyFile string + Method SSHAuthMethod +} + +// Config is a structure to store sFTP backend configuration +type Config struct { + CacheRoot string + Username string + Host string + Port string + Auth SSHAuth + Timeout time.Duration +} diff --git a/storage/backend/sftp/sftp.go b/storage/backend/sftp/sftp.go new file mode 100644 index 00000000..44cbad49 --- /dev/null +++ b/storage/backend/sftp/sftp.go @@ -0,0 +1,161 @@ +package sftp + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "path/filepath" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + + "github.com/meltwater/drone-cache/internal" +) + +// Backend TODO +type Backend struct { + logger log.Logger + + cacheRoot string + client *sftp.Client +} + +// New creates a new sFTP backend. +func New(l log.Logger, c Config) (*Backend, error) { + authMethod, err := authMethod(c) + if err != nil { + return nil, fmt.Errorf("unable to get ssh auth method %w", err) + } + + /* #nosec */ + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", c.Host, c.Port), &ssh.ClientConfig{ + User: c.Username, + Auth: authMethod, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), // #nosec TODO(kakkoyun) just a workaround for now, will fix + Timeout: c.Timeout, + }) + if err != nil { + return nil, fmt.Errorf("unable to connect to ssh %w", err) + } + + client, err := sftp.NewClient(sshClient) + if err != nil { + sshClient.Close() + return nil, fmt.Errorf("unable to connect to ssh with sftp protocol %w", err) + } + + //nolint: TODO(kakkoyun): Should it be created? + if _, err := client.Stat(c.CacheRoot); err != nil { + return nil, fmt.Errorf("make sure cache root <%s> created, %w", c.CacheRoot, err) + } + + level.Debug(l).Log("msg", "sftp backend", "config", fmt.Sprintf("%#v", c)) + + return &Backend{logger: l, client: client, cacheRoot: c.CacheRoot}, nil +} + +// Get writes downloaded content to the given writer. +func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { + path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) + if err != nil { + return fmt.Errorf("generate absolute path %w", err) + } + + errCh := make(chan error) + + go func() { + defer close(errCh) + + rc, err := b.client.Open(path) + if err != nil { + errCh <- fmt.Errorf("get the object %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, rc, "reader close defer") + + _, err = io.Copy(w, rc) + if err != nil { + errCh <- fmt.Errorf("copy the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Put uploads contents of the given reader. +func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { + errCh := make(chan error) + + go func() { + defer close(errCh) + + path := filepath.Clean(filepath.Join(b.cacheRoot, p)) + + dir := filepath.Dir(path) + if err := b.client.MkdirAll(dir); err != nil { + errCh <- fmt.Errorf("create directory %w", err) + return + } + + w, err := b.client.Create(path) + if err != nil { + errCh <- fmt.Errorf("create cache file %w", err) + return + } + + defer internal.CloseWithErrLogf(b.logger, w, "writer close defer") + + if _, err := io.Copy(w, r); err != nil { + errCh <- fmt.Errorf("write contents of reader to a file %w", err) + } + + if err := w.Close(); err != nil { + errCh <- fmt.Errorf("close the object %w", err) + } + }() + + select { + case err := <-errCh: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +// Helpers + +func authMethod(c Config) ([]ssh.AuthMethod, error) { + switch c.Auth.Method { + case SSHAuthMethodPassword: + return []ssh.AuthMethod{ssh.Password(c.Auth.Password)}, nil + case SSHAuthMethodPublicKeyFile: + pkAuthMethod, err := readPublicKeyFile(c.Auth.PublicKeyFile) + return []ssh.AuthMethod{pkAuthMethod}, err + default: + return nil, errors.New("unknown ssh method (PASSWORD, PUBLIC_KEY_FILE)") + } +} + +func readPublicKeyFile(file string) (ssh.AuthMethod, error) { + buffer, err := ioutil.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("unable to read file %w", err) + } + + key, err := ssh.ParsePrivateKey(buffer) + if err != nil { + return nil, fmt.Errorf("unable to parse private key %w", err) + } + + return ssh.PublicKeys(key), nil +} diff --git a/storage/backend/sftp/sftp_test.go b/storage/backend/sftp/sftp_test.go new file mode 100644 index 00000000..51f3ad6e --- /dev/null +++ b/storage/backend/sftp/sftp_test.go @@ -0,0 +1,83 @@ +// +build integration + +package sftp + +import ( + "bytes" + "context" + "io/ioutil" + "os" + "strings" + "testing" + + "github.com/meltwater/drone-cache/test" + + "github.com/go-kit/kit/log" +) + +const ( + defaultSFTPHost = "127.0.0.1" + defaultSFTPPort = "22" + defaultUsername = "foo" + defaultPassword = "pass" + defaultCacheRoot = "/upload" +) + +var ( + host = getEnv("TEST_SFTP_HOST", defaultSFTPHost) + port = getEnv("TEST_SFTP_PORT", defaultSFTPPort) + username = getEnv("TEST_SFTP_USERNAME", defaultUsername) + password = getEnv("TEST_SFTP_PASSWORD", defaultPassword) + cacheRoot = getEnv("TEST_SFTP_CACHE_ROOT", defaultCacheRoot) +) + +func TestRoundTrip(t *testing.T) { + t.Parallel() + + backend, cleanUp := setup(t) + t.Cleanup(cleanUp) + + content := "Hello world4" + + // Test Put + test.Ok(t, backend.Put(context.TODO(), "test.t", strings.NewReader(content))) + + // Test Get + var buf bytes.Buffer + test.Ok(t, backend.Get(context.TODO(), "test.t", &buf)) + + b, err := ioutil.ReadAll(&buf) + test.Ok(t, err) + + test.Equals(t, []byte(content), b) +} + +// Helpers + +func setup(t *testing.T) (*Backend, func()) { + b, err := New( + log.NewNopLogger(), + Config{ + CacheRoot: cacheRoot, + Username: username, + Auth: SSHAuth{ + Password: password, + Method: SSHAuthMethodPassword, + }, + Host: host, + Port: port, + }, + ) + test.Ok(t, err) + + return b, func() {} +} + +func getEnv(key, defaultVal string) string { + value, ok := os.LookupEnv(key) + if !ok { + return defaultVal + } + + return value +} diff --git a/storage/storage.go b/storage/storage.go new file mode 100644 index 00000000..dbd6bc14 --- /dev/null +++ b/storage/storage.go @@ -0,0 +1,71 @@ +package storage + +import ( + "context" + "io" + "time" + + "github.com/meltwater/drone-cache/storage/backend" + + "github.com/go-kit/kit/log" +) + +const DefaultOperationTimeout = 3 * time.Minute + +// Storage is a place that files can be written to and read from. +type Storage interface { + // Get writes contents of the given object with given key from remote storage to io.Writer. + Get(p string, w io.Writer) error + + // Put writes contents of io.Reader to remote storage at given key location. + Put(p string, r io.Reader) error + + // List lists contents of the given directory by given key from remote storage. + List(p string) ([]backend.FileEntry, error) + + // Delete deletes the object from remote storage. + Delete(p string) error +} + +// Default Storage implementation. +type storage struct { + logger log.Logger + + b backend.Backend + timeout time.Duration +} + +// New create a new default storage. +func New(l log.Logger, b backend.Backend, timeout time.Duration) Storage { + return &storage{l, b, timeout} +} + +// Get writes contents of the given object with given key from remote storage to io.Writer. +func (s *storage) Get(p string, w io.Writer) error { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + return s.b.Get(ctx, p, w) +} + +// Put writes contents of io.Reader to remote storage at given key location. +func (s *storage) Put(p string, r io.Reader) error { + ctx, cancel := context.WithTimeout(context.Background(), s.timeout) + defer cancel() + + return s.b.Put(ctx, p, r) +} + +// List lists contents of the given directory by given key from remote storage. +func (s *storage) List(p string) ([]backend.FileEntry, error) { + // Implement me! + // Make sure consumer utilizes context. + return []backend.FileEntry{}, nil +} + +// Delete deletes the object from remote storage. +func (s *storage) Delete(p string) error { + // Implement me! + // Make sure consumer utilizes context. + return nil +} From 130e88693173277c22851f0c758f846ec15fdc5c Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:13:22 +0200 Subject: [PATCH 02/16] Introduce Key generators --- .../generator}/checksum_file_test.txt | 0 key/generator/hash.go | 40 +++++++ key/generator/hash_test.go | 17 +++ key/generator/metadata.go | 105 ++++++++++++++++++ key/generator/metadata_test.go | 81 ++++++++++++++ key/generator/static.go | 20 ++++ key/generator/util.go | 21 ++++ key/key.go | 10 ++ plugin/cachekey/cachekey.go | 96 ---------------- plugin/cachekey/cachekey_test.go | 79 ------------- 10 files changed, 294 insertions(+), 175 deletions(-) rename {plugin/cachekey => key/generator}/checksum_file_test.txt (100%) create mode 100644 key/generator/hash.go create mode 100644 key/generator/hash_test.go create mode 100644 key/generator/metadata.go create mode 100644 key/generator/metadata_test.go create mode 100644 key/generator/static.go create mode 100644 key/generator/util.go create mode 100644 key/key.go delete mode 100644 plugin/cachekey/cachekey.go delete mode 100644 plugin/cachekey/cachekey_test.go diff --git a/plugin/cachekey/checksum_file_test.txt b/key/generator/checksum_file_test.txt similarity index 100% rename from plugin/cachekey/checksum_file_test.txt rename to key/generator/checksum_file_test.txt diff --git a/key/generator/hash.go b/key/generator/hash.go new file mode 100644 index 00000000..caab155e --- /dev/null +++ b/key/generator/hash.go @@ -0,0 +1,40 @@ +package generator + +import ( + "fmt" + "io" + "strings" +) + +// Hash TODO +type Hash struct { + defaultParts []string +} + +// NewHash TODO +func NewHash(defaultParts ...string) *Hash { + return &Hash{defaultParts: defaultParts} +} + +// Generate generates key from given parts or templates as parameter. +func (h *Hash) Generate(parts ...string) (string, error) { + key, err := hash(append(parts, h.defaultParts...)...) + if err != nil { + return "", fmt.Errorf("generate hash key for mounted %w", err) + } + + return key, nil +} + +// Check checks if generator functional. +func (h *Hash) Check() error { return nil } + +// hash generates a key based on given strings (ie. filename paths and branch). +func hash(parts ...string) (string, error) { + readers := make([]io.Reader, len(parts)) + for i, p := range parts { + readers[i] = strings.NewReader(p) + } + + return readerHasher(readers...) +} diff --git a/key/generator/hash_test.go b/key/generator/hash_test.go new file mode 100644 index 00000000..de264d8a --- /dev/null +++ b/key/generator/hash_test.go @@ -0,0 +1,17 @@ +package generator + +import ( + "testing" + + "github.com/meltwater/drone-cache/test" +) + +func TestGenerateHash(t *testing.T) { + t.Parallel() + + actual, err := NewHash().Generate("hash") + test.Ok(t, err) + + expected := "0800fc577294c34e0b28ad2839435945" + test.Equals(t, actual, expected) +} diff --git a/key/generator/metadata.go b/key/generator/metadata.go new file mode 100644 index 00000000..ed5f2ec2 --- /dev/null +++ b/key/generator/metadata.go @@ -0,0 +1,105 @@ +package generator + +import ( + // #nosec + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "text/template" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/internal" + "github.com/meltwater/drone-cache/internal/metadata" +) + +// Metadata TODO +type Metadata struct { + logger log.Logger + + tmpl string + data metadata.Metadata + funcMap template.FuncMap +} + +// NewMetadata creates a new Key Generator. +func NewMetadata(logger log.Logger, tmpl string, data metadata.Metadata) *Metadata { + return &Metadata{ + logger: logger, + tmpl: tmpl, + data: data, + funcMap: template.FuncMap{ + "checksum": checksumFunc(logger), + "epoch": func() string { return strconv.FormatInt(time.Now().Unix(), 10) }, + "arch": func() string { return runtime.GOARCH }, + "os": func() string { return runtime.GOOS }, + }, + } +} + +// Generate generates key from given template as parameter or fallbacks hash. +func (g *Metadata) Generate(_ ...string) (string, error) { + // NOTICE: for now only consume a single template which will be changed. + level.Info(g.logger).Log("msg", "using provided cache key template") + + if g.tmpl == "" { + return "", errors.New("cache key template is empty") + } + + t, err := g.parseTemplate() + if err != nil { + return "", fmt.Errorf("parse, <%s> as cache key template, falling back to default %w", g.tmpl, err) + } + + var b strings.Builder + + err = t.Execute(&b, g.data) + if err != nil { + return "", fmt.Errorf("build, <%s> as cache key, falling back to default %w", g.tmpl, err) + } + + return b.String(), nil +} + +// Check checks if template is parsable. +func (g *Metadata) Check() error { + _, err := g.parseTemplate() + return err +} + +// Helpers + +func (g *Metadata) parseTemplate() (*template.Template, error) { + return template.New("cacheKey").Funcs(g.funcMap).Parse(g.tmpl) +} + +func checksumFunc(logger log.Logger) func(string) string { + return func(p string) string { + path, err := filepath.Abs(filepath.Clean(p)) + if err != nil { + level.Error(logger).Log("cache key template/checksum could not find file") + return "" + } + + f, err := os.Open(path) + if err != nil { + level.Error(logger).Log("cache key template/checksum could not open file") + return "" + } + + defer internal.CloseWithErrLogf(logger, f, "checksum close defer") + + str, err := readerHasher(f) + if err != nil { + level.Error(logger).Log("cache key template/checksum could not generate hash") + return "" + } + + return str + } +} diff --git a/key/generator/metadata_test.go b/key/generator/metadata_test.go new file mode 100644 index 00000000..71a9d60b --- /dev/null +++ b/key/generator/metadata_test.go @@ -0,0 +1,81 @@ +package generator + +import ( + "testing" + "text/template" + + "github.com/go-kit/kit/log" + "github.com/meltwater/drone-cache/internal/metadata" + "github.com/meltwater/drone-cache/test" +) + +func TestGenerate(t *testing.T) { + t.Parallel() + + l := log.NewNopLogger() + + for _, tt := range []struct { + given string + expected string + }{ + {`{{ .Repo.Name }}`, "RepoName"}, + {`{{ checksum "checksum_file_test.txt"}}`, "04a29c732ecbce101c1be44c948a50c6"}, + {`{{ checksum "../../docs/drone_env_vars.md"}}`, "f8b5b7f96f3ffaa828e4890aab290e59"}, + {`{{ epoch }}`, "1550563151"}, + {`{{ arch }}`, "amd64"}, + {`{{ os }}`, "darwin"}, + } { + tt := tt + t.Run(tt.given, func(t *testing.T) { + g := Metadata{ + logger: l, + tmpl: tt.given, + data: metadata.Metadata{Repo: metadata.Repo{Name: "RepoName"}}, + funcMap: template.FuncMap{ + "checksum": checksumFunc(l), + "epoch": func() string { return "1550563151" }, + "arch": func() string { return "amd64" }, + "os": func() string { return "darwin" }, + }, + } + + actual, err := g.Generate(tt.given) + test.Ok(t, err) + test.Equals(t, actual, tt.expected) + }) + } +} + +func TestParseTemplate(t *testing.T) { + t.Parallel() + + l := log.NewNopLogger() + + for _, tt := range []struct { + given string + }{ + {`{{ .Repo.Name }}`}, + {`{{ checksum "checksum_file_test.txt"}}`}, + {`{{ epoch }}`}, + {`{{ arch }}`}, + {`{{ os }}`}, + } { + tt := tt + t.Run(tt.given, func(t *testing.T) { + g := Metadata{ + logger: l, + tmpl: tt.given, + data: metadata.Metadata{Repo: metadata.Repo{Name: "RepoName"}}, + funcMap: template.FuncMap{ + "checksum": checksumFunc(l), + "epoch": func() string { return "1550563151" }, + "arch": func() string { return "amd64" }, + "os": func() string { return "darwin" }, + }, + } + + _, err := g.parseTemplate() + test.Ok(t, err) + }) + } +} diff --git a/key/generator/static.go b/key/generator/static.go new file mode 100644 index 00000000..bb5ec395 --- /dev/null +++ b/key/generator/static.go @@ -0,0 +1,20 @@ +package generator + +import "path/filepath" + +type Static struct { + defaultParts []string +} + +// NewStatic TODO +func NewStatic(defaultParts ...string) *Static { + return &Static{defaultParts: defaultParts} +} + +// Generate generates key from given parts or templates as parameter. +func (s *Static) Generate(parts ...string) (string, error) { + return filepath.Join(append(parts, s.defaultParts...)...), nil +} + +// Check checks if generator functional. +func (s *Static) Check() error { return nil } diff --git a/key/generator/util.go b/key/generator/util.go new file mode 100644 index 00000000..42592334 --- /dev/null +++ b/key/generator/util.go @@ -0,0 +1,21 @@ +package generator + +import ( + "crypto/md5" // #nosec + "fmt" + "io" +) + +// readerHasher generic md5 hash generater from io.Reader. +func readerHasher(readers ...io.Reader) (string, error) { + // Use go1.14 new hashmap functions. + h := md5.New() // #nosec + + for _, r := range readers { + if _, err := io.Copy(h, r); err != nil { + return "", fmt.Errorf("write reader as hash %w", err) + } + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil +} diff --git a/key/key.go b/key/key.go new file mode 100644 index 00000000..51872ebc --- /dev/null +++ b/key/key.go @@ -0,0 +1,10 @@ +package key + +// Generator defines a key generator. +type Generator interface { + // Generate generates key from given parts or templates as parameter. + Generate(parts ...string) (string, error) + + // Check checks if generator functional. + Check() error +} diff --git a/plugin/cachekey/cachekey.go b/plugin/cachekey/cachekey.go deleted file mode 100644 index 4d6e3286..00000000 --- a/plugin/cachekey/cachekey.go +++ /dev/null @@ -1,96 +0,0 @@ -package cachekey - -import ( - "crypto/md5" // #nosec - "errors" - "fmt" - "io" - "log" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "text/template" - "time" - - "github.com/meltwater/drone-cache/metadata" -) - -var funcMap = template.FuncMap{ - "checksum": func(path string) string { - absPath, err := filepath.Abs(filepath.Clean(path)) - if err != nil { - log.Println("cache key template/checksum could not find file") - return "" - } - - f, err := os.Open(absPath) - if err != nil { - log.Println("cache key template/checksum could not open file") - return "" - } - defer f.Close() - - str, err := readerHasher(f) - if err != nil { - log.Println("cache key template/checksum could not generate hash") - return "" - } - return str - }, - "epoch": func() string { return strconv.FormatInt(time.Now().Unix(), 10) }, - "arch": func() string { return runtime.GOARCH }, - "os": func() string { return runtime.GOOS }, -} - -// Generate generates key from given template as parameter or fallbacks hash -func Generate(tmpl, mount string, data metadata.Metadata) (string, error) { - if tmpl == "" { - return "", errors.New("cache key template is empty") - } - - t, err := ParseTemplate(tmpl) - if err != nil { - return "", fmt.Errorf("parse, <%s> as cache key template, falling back to default %w", tmpl, err) - } - - var b strings.Builder - - err = t.Execute(&b, data) - if err != nil { - return "", fmt.Errorf("build, <%s> as cache key, falling back to default %w", tmpl, err) - } - - return filepath.Join(b.String(), mount), nil -} - -// ParseTemplate parses and mounts helper functions to template engine -func ParseTemplate(tmpl string) (*template.Template, error) { - return template.New("cacheKey").Funcs(funcMap).Parse(tmpl) -} - -// Hash generates a key based on given strings (ie. filename paths and branch) -func Hash(parts ...string) (string, error) { - readers := make([]io.Reader, len(parts)) - for i, p := range parts { - readers[i] = strings.NewReader(p) - } - - return readerHasher(readers...) -} - -// Helpers - -// readerHasher generic md5 hash generater from io.Readers -func readerHasher(readers ...io.Reader) (string, error) { - h := md5.New() // #nosec - - for _, r := range readers { - if _, err := io.Copy(h, r); err != nil { - return "", fmt.Errorf("write reader as hash %w", err) - } - } - - return fmt.Sprintf("%x", h.Sum(nil)), nil -} diff --git a/plugin/cachekey/cachekey_test.go b/plugin/cachekey/cachekey_test.go deleted file mode 100644 index 09df4f6d..00000000 --- a/plugin/cachekey/cachekey_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package cachekey - -import ( - "testing" - "text/template" - - "github.com/meltwater/drone-cache/metadata" -) - -var mockFuncMap = template.FuncMap{ - "checksum": funcMap["checksum"], - "epoch": func() string { return "1550563151" }, - "arch": func() string { return "amd64" }, - "os": func() string { return "darwin" }, -} - -func init() { - funcMap = mockFuncMap -} - -func TestGenerate(t *testing.T) { - table := []struct { - given string - expected string - }{ - {`{{ .Repo.Name }}`, "RepoName"}, - {`{{ checksum "checksum_file_test.txt"}}`, "04a29c732ecbce101c1be44c948a50c6"}, - {`{{ checksum "../../docs/drone_env_vars.md"}}`, "f8b5b7f96f3ffaa828e4890aab290e59"}, - {`{{ epoch }}`, "1550563151"}, - {`{{ arch }}`, "amd64"}, - {`{{ os }}`, "darwin"}, - } - m := metadata.Metadata{Repo: metadata.Repo{Name: "RepoName"}} - - for _, tt := range table { - t.Run(tt.given, func(t *testing.T) { - actual, err := Generate(tt.given, "", m) - if err != nil { - t.Errorf("generate failed, error: %v\n", err) - } - - if actual != tt.expected { - t.Errorf("generate failed, got: %s, want: %s\n", actual, tt.expected) - } - }) - } -} - -func TestParseTemplate(t *testing.T) { - table := []struct { - given string - }{ - {`{{ .Repo.Name }}`}, - {`{{ checksum "checksum_file_test.txt"}}`}, - {`{{ epoch }}`}, - {`{{ arch }}`}, - {`{{ os }}`}, - } - for _, tt := range table { - t.Run(tt.given, func(t *testing.T) { - _, err := ParseTemplate(tt.given) - if err != nil { - t.Errorf("parser template failed, error: %v\n", err) - } - }) - } -} - -func TestHash(t *testing.T) { - actual, err := Hash("hash") - if err != nil { - t.Errorf("hash failed, error: %v\n", err) - } - - expected := "0800fc577294c34e0b28ad2839435945" - if actual != expected { - t.Errorf("hash failed, got: %s, want: %s\n", actual, expected) - } -} From d9e4de9bf4af0c0e56def32c14e2e257f9b3e660 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:15:26 +0200 Subject: [PATCH 03/16] Introduce internal and move packages --- {metadata => internal/metadata}/metadata.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) rename {metadata => internal/metadata}/metadata.go (71%) diff --git a/metadata/metadata.go b/internal/metadata/metadata.go similarity index 71% rename from metadata/metadata.go rename to internal/metadata/metadata.go index 73d475ed..ba38f263 100644 --- a/metadata/metadata.go +++ b/internal/metadata/metadata.go @@ -1,7 +1,7 @@ package metadata type ( - // Repo stores information about repository that is built + // Repo stores information about repository that is built. Repo struct { Avatar string Branch string @@ -13,7 +13,7 @@ type ( Trusted bool } - // Build stores information about current build + // Build stores information about current build. Build struct { Created int64 Deploy string @@ -25,7 +25,7 @@ type ( Status string } - // Commit stores information about current commit + // Commit stores information about current commit. Commit struct { Author Author Branch string @@ -36,13 +36,14 @@ type ( Sha string } - // Author stores information about current commit's author + // Author stores information about current commit's author. Author struct { Avatar string Email string Name string } + // Metadata stores information about current pipeline run. Metadata struct { Build Build Commit Commit From 4eff7a7158d09da77c4844c561d20552fe288269 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:15:46 +0200 Subject: [PATCH 04/16] Introduce new Archive layer --- archive/archive.go | 52 +++++++ archive/gzip/gzip.go | 49 +++++++ archive/gzip/gzip_test.go | 278 ++++++++++++++++++++++++++++++++++++ archive/option.go | 31 ++++ archive/tar/tar.go | 290 ++++++++++++++++++++++++++++++++++++++ archive/tar/tar_test.go | 282 ++++++++++++++++++++++++++++++++++++ 6 files changed, 982 insertions(+) create mode 100644 archive/archive.go create mode 100644 archive/gzip/gzip.go create mode 100644 archive/gzip/gzip_test.go create mode 100644 archive/option.go create mode 100644 archive/tar/tar.go create mode 100644 archive/tar/tar_test.go diff --git a/archive/archive.go b/archive/archive.go new file mode 100644 index 00000000..800e02af --- /dev/null +++ b/archive/archive.go @@ -0,0 +1,52 @@ +package archive + +import ( + "compress/flate" + "io" + + "github.com/meltwater/drone-cache/archive/gzip" + "github.com/meltwater/drone-cache/archive/tar" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +const ( + Gzip = "gzip" + Tar = "tar" + + DefaultCompressionLevel = flate.DefaultCompression + DefaultArchiveFormat = Tar +) + +// Archive is an interface that defines exposed behavior of archive formats. +type Archive interface { + // Create writes content of the given source to an archive, returns written bytes. + // Similar to io.WriterTo. + Create(srcs []string, w io.Writer) (int64, error) + + // Extract reads content from the given archive reader and restores it to the destination, returns written bytes. + // Similar to io.ReaderFrom. + Extract(dst string, r io.Reader) (int64, error) +} + +// FromFormat determines which archive to use from given archive format. +func FromFormat(logger log.Logger, format string, opts ...Option) Archive { + options := options{ + compressionLevel: DefaultCompressionLevel, + } + + for _, o := range opts { + o.apply(&options) + } + + switch format { + case Gzip: + return gzip.New(logger, options.skipSymlinks, options.compressionLevel) + case Tar: + return tar.New(logger, options.skipSymlinks) + default: + level.Error(logger).Log("msg", "unknown archive format", "format", format) + return tar.New(logger, options.skipSymlinks) // DefaultArchiveFormat + } +} diff --git a/archive/gzip/gzip.go b/archive/gzip/gzip.go new file mode 100644 index 00000000..ffc7f2da --- /dev/null +++ b/archive/gzip/gzip.go @@ -0,0 +1,49 @@ +package gzip + +import ( + "compress/gzip" + "fmt" + "io" + + "github.com/meltwater/drone-cache/archive/tar" + "github.com/meltwater/drone-cache/internal" + + "github.com/go-kit/kit/log" +) + +// pArchive TODO +type Archive struct { + logger log.Logger + + compressionLevel int + skipSymlinks bool +} + +// New creates an archive that uses the .tar.gz file format. +func New(logger log.Logger, skipSymlinks bool, compressionLevel int) *Archive { + return &Archive{logger, compressionLevel, skipSymlinks} +} + +// Create writes content of the given source to an archive, returns written bytes. +func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { + gw, err := gzip.NewWriterLevel(w, a.compressionLevel) + if err != nil { + return 0, fmt.Errorf("create archive writer %w", err) + } + + defer internal.CloseWithErrLogf(a.logger, gw, "gzip writer") + + return tar.New(a.logger, a.skipSymlinks).Create(srcs, gw) +} + +// Extract reads content from the given archive reader and restores it to the destination, returns written bytes. +func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { + gr, err := gzip.NewReader(r) + if err != nil { + return 0, err + } + + defer internal.CloseWithErrLogf(a.logger, gr, "gzip reader") + + return tar.New(a.logger, a.skipSymlinks).Extract(dst, gr) +} diff --git a/archive/gzip/gzip_test.go b/archive/gzip/gzip_test.go new file mode 100644 index 00000000..4a93b2fa --- /dev/null +++ b/archive/gzip/gzip_test.go @@ -0,0 +1,278 @@ +package gzip + +import ( + "compress/flate" + "compress/gzip" + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/go-kit/kit/log" + + "github.com/meltwater/drone-cache/archive/tar" + "github.com/meltwater/drone-cache/test" +) + +var ( + testRoot = "testdata" + testRootMounted = "testdata/mounted" + testRootExtracted = "testdata/extracted" +) + +func TestCreate(t *testing.T) { + test.Ok(t, os.MkdirAll(testRootMounted, 0755)) + test.Ok(t, os.MkdirAll(testRootExtracted, 0755)) + t.Cleanup(func() { os.RemoveAll(testRoot) }) + + for _, tc := range []struct { + name string + tgz *Archive + srcs []string + written int64 + err error + }{ + { + name: "empty mount paths", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "non-existing mount paths", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + srcs: []string{ + "iamnotexists", + "metoo", + }, + written: 0, + err: tar.ErrSourceNotReachable, // os.ErrNotExist || os.ErrPermission + }, + { + name: "existing mount paths", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + srcs: exampleFileTree(t, "gzip_create"), + written: 43, // 3 x tmpfile in dir, 1 tmpfile + err: nil, + }, + { + name: "existing mount paths with symbolic links", + tgz: New(log.NewNopLogger(), false, flate.DefaultCompression), + srcs: exampleFileTreeWithSymlinks(t, "gzip_create_symlink"), + written: 43, + err: nil, + }, + } { + tc := tc // NOTE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Setup + dstDir, dstDirClean := test.CreateTempDir(t, "gzip_create_archives", testRootMounted) + t.Cleanup(dstDirClean) + + extDir, extDirClean := test.CreateTempDir(t, "gzip_create_extracted", testRootExtracted) + t.Cleanup(extDirClean) + + // Run + archivePath := filepath.Join(dstDir, filepath.Clean(tc.name+".tar.gz")) + written, err := create(tc.tgz, tc.srcs, archivePath) + if err != nil { + test.Expected(t, err, tc.err) + return + } + + test.Exists(t, archivePath) + test.Assert(t, written == tc.written, "case %q: written bytes got %d want %v", tc.name, written, tc.written) + + _, err = extract(tc.tgz, archivePath, extDir) + test.Ok(t, err) + test.EqualDirs(t, extDir, testRootMounted, tc.srcs) + }) + } +} + +func TestExtract(t *testing.T) { + test.Ok(t, os.MkdirAll(testRootMounted, 0755)) + test.Ok(t, os.MkdirAll(testRootExtracted, 0755)) + t.Cleanup(func() { os.RemoveAll(testRoot) }) + + // Setup + tgz := New(log.NewNopLogger(), false, flate.DefaultCompression) + + arcDir, arcDirClean := test.CreateTempDir(t, "gzip_extract_archive") + t.Cleanup(arcDirClean) + + files := exampleFileTree(t, "gzip_extract") + + archivePath := filepath.Join(arcDir, "test.tar.gz") + _, err := create(tgz, files, archivePath) + test.Ok(t, err) + + filesWithSymlink := exampleFileTreeWithSymlinks(t, "gzip_extract_symlink") + archiveWithSymlinkPath := filepath.Join(arcDir, "test_with_symlink.tar.gz") + _, err = create(tgz, filesWithSymlink, archiveWithSymlinkPath) + test.Ok(t, err) + + emptyArchivePath := filepath.Join(arcDir, "empty_test.tar.gz") + _, err = create(tgz, []string{}, emptyArchivePath) + test.Ok(t, err) + + badArchivePath := filepath.Join(arcDir, "bad_test.tar.gz") + test.Ok(t, ioutil.WriteFile(badArchivePath, []byte("hello\ndrone\n"), 0644)) + + for _, tc := range []struct { + name string + tgz *Archive + archivePath string + srcs []string + written int64 + err error + }{ + { + name: "non-existing archive", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + archivePath: "iamnotexists", + srcs: []string{}, + written: 0, + err: os.ErrNotExist, + }, + { + name: "non-existing root destination", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + archivePath: emptyArchivePath, + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "empty archive", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + archivePath: emptyArchivePath, + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "bad archives", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + archivePath: badArchivePath, + srcs: []string{}, + written: 0, + err: gzip.ErrHeader, + }, + { + name: "existing archive", + tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + archivePath: archivePath, + srcs: files, + written: 43, + err: nil, + }, + { + name: "existing archive with symbolic links", + tgz: New(log.NewNopLogger(), false, flate.DefaultCompression), + archivePath: archiveWithSymlinkPath, + srcs: filesWithSymlink, + written: 43, + err: nil, + }, + } { + tc := tc // NOTE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + dstDir, dstDirClean := test.CreateTempDir(t, "gzip_extract_"+tc.name, testRootExtracted) + t.Cleanup(dstDirClean) + + written, err := extract(tc.tgz, tc.archivePath, dstDir) + if err != nil { + test.Expected(t, err, tc.err) + return + } + + test.Assert(t, written == tc.written, "case %q: written bytes got %d want %v", tc.name, written, tc.written) + test.EqualDirs(t, dstDir, testRootMounted, tc.srcs) + }) + } +} + +// Helpers + +func create(a *Archive, srcs []string, dst string) (int64, error) { + pr, pw := io.Pipe() + defer pr.Close() + + var written int64 + go func(w *int64) { + defer pw.Close() + + written, err := a.Create(srcs, pw) + if err != nil { + pw.CloseWithError(err) + } + + *w = written + }(&written) + + content, err := ioutil.ReadAll(pr) + if err != nil { + pr.CloseWithError(err) + return 0, err + } + + if err := ioutil.WriteFile(dst, content, 0644); err != nil { + return 0, err + } + + return written, nil +} + +func extract(a *Archive, src string, dst string) (int64, error) { + pr, pw := io.Pipe() + defer pr.Close() + + f, err := os.Open(src) + if err != nil { + return 0, err + } + + go func() { + defer pw.Close() + + _, err = io.Copy(pw, f) + if err != nil { + pw.CloseWithError(err) + } + }() + + return a.Extract(dst, pr) +} + +// Fixtures + +func exampleFileTree(t *testing.T, name string) []string { + file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes + t.Cleanup(fileClean) + + dir, dirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), testRootMounted) // 10 bytes + t.Cleanup(dirClean) + + return []string{file, dir} +} + +func exampleFileTreeWithSymlinks(t *testing.T, name string) []string { + file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes + t.Cleanup(fileClean) + + symlink := filepath.Join(filepath.Dir(file), name+"_symlink.testfile") + test.Ok(t, os.Symlink(file, symlink)) + t.Cleanup(func() { os.Remove(symlink) }) + + dir, dirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), testRootMounted) // 10 bytes + t.Cleanup(dirClean) + + return []string{file, dir, symlink} +} diff --git a/archive/option.go b/archive/option.go new file mode 100644 index 00000000..b794b697 --- /dev/null +++ b/archive/option.go @@ -0,0 +1,31 @@ +package archive + +type options struct { + compressionLevel int + skipSymlinks bool +} + +// Option overrides behavior of Archive. +type Option interface { + apply(*options) +} + +type optionFunc func(*options) + +func (f optionFunc) apply(o *options) { + f(o) +} + +// WithSkipSymlinks sets skip symlink option. +func WithSkipSymlinks(b bool) Option { + return optionFunc(func(o *options) { + o.skipSymlinks = b + }) +} + +// WithCompressionLevel sets compression level option. +func WithCompressionLevel(i int) Option { + return optionFunc(func(o *options) { + o.compressionLevel = i + }) +} diff --git a/archive/tar/tar.go b/archive/tar/tar.go new file mode 100644 index 00000000..383defda --- /dev/null +++ b/archive/tar/tar.go @@ -0,0 +1,290 @@ +package tar + +import ( + "archive/tar" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/go-kit/kit/log" + "github.com/meltwater/drone-cache/internal" +) + +var ( + // ErrSourceNotReachable TODO + ErrSourceNotReachable = errors.New("source not reachable") + // ErrArchiveNotReadable TODO + ErrArchiveNotReadable = errors.New("archive not readable") +) + +// Archive TODO +type Archive struct { + logger log.Logger + + skipSymlinks bool +} + +// New creates an archive that uses the .tar file format. +func New(logger log.Logger, skipSymlinks bool) *Archive { + return &Archive{logger, skipSymlinks} +} + +// Create writes content of the given source to an archive, returns written bytes. +func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { + tw := tar.NewWriter(w) + defer internal.CloseWithErrLogf(a.logger, tw, "tar writer") + + var written int64 + + for _, src := range srcs { + info, err := os.Lstat(src) + if err != nil { + return written, fmt.Errorf("make sure file or directory readable <%s>: %v, %w", src, err, ErrSourceNotReachable) + } + + if info.IsDir() { + if err := filepath.Walk(src, writeToArchive(tw, src, a.skipSymlinks, &written)); err != nil { + return written, fmt.Errorf("walk, add all files to archive %w", err) + } + } else { + if err := writeToArchive(tw, src, a.skipSymlinks, &written)(src, info, nil); err != nil { + return written, fmt.Errorf("add file to archive %w", err) + } + } + } + + return written, nil +} + +//nolint: lll +func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int64) func(string, os.FileInfo, error) error { + return func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if fi == nil { + return errors.New("no file info") + } + + // Create header for Regular files and Directories + h, err := tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return fmt.Errorf("create header for <%s> %w", path, err) + } + + if fi.Mode()&os.ModeSymlink != 0 { // isSymbolic + if skipSymlinks { + return nil + } + + var err error + if h, err = createSymlinkHeader(fi, path); err != nil { + return fmt.Errorf("create header for symbolic link %w", err) + } + } + + name, err := relativeName(root, path) + if err != nil { + return fmt.Errorf("relative name %w", err) + } + + h.Name = name + + if err := tw.WriteHeader(h); err != nil { + return fmt.Errorf("write header for <%s> %w", path, err) + } + + if !fi.Mode().IsRegular() { + return nil + } + + n, err := writeFileToArchive(tw, path) + if err != nil { + return fmt.Errorf("write file to archive %w", err) + } + + *written += n + // Alternatives: + // *written += h.FileInfo().Size() + // *written += fi.Size() + + return nil + } +} + +func relativeName(src, path string) (string, error) { + info, err := os.Lstat(src) + if err != nil { + return "", fmt.Errorf("%s: stat %w", src, err) + } + + name := filepath.Base(path) + + if info.IsDir() { + dir, err := filepath.Rel(filepath.Dir(src), filepath.Dir(path)) + if err != nil { + return "", fmt.Errorf("relative path %q: %q %v", path, dir, err) + } + + name = filepath.Join(filepath.ToSlash(dir), name) + } + + return strings.TrimPrefix(filepath.ToSlash(name), "/"), nil +} + +func createSymlinkHeader(fi os.FileInfo, path string) (*tar.Header, error) { + lnk, err := os.Readlink(path) + if err != nil { + return nil, fmt.Errorf("read link <%s> %w", path, err) + } + + h, err := tar.FileInfoHeader(fi, lnk) + if err != nil { + return nil, fmt.Errorf("create symlink header for <%s> %w", path, err) + } + + return h, nil +} + +func writeFileToArchive(tw io.Writer, path string) (n int64, err error) { + f, err := os.Open(path) + if err != nil { + return 0, fmt.Errorf("open file <%s> %w", path, err) + } + + defer internal.CloseWithErrCapturef(&err, f, "write file to archive <%s>", path) + + written, err := io.Copy(tw, f) + if err != nil { + return written, fmt.Errorf("copy the file <%s> data to the tarball %w", path, err) + } + + return written, nil +} + +// Extract reads content from the given archive reader and restores it to the destination, returns written bytes. +func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { + var ( + written int64 + tr = tar.NewReader(r) + ) + + for { + h, err := tr.Next() + + switch { + case err == io.EOF: // if no more files are found return + return written, nil + case err != nil: // return any other error + return written, fmt.Errorf("tar reader %v: %w", err, ErrArchiveNotReadable) + case h == nil: // if the header is nil, skip it + continue + } + + var target string + // NOTICE: It's been done like this to be compatible with normal behavior of a tar extract. + switch { + case filepath.Base(dst) == filepath.Dir(h.Name): + target = filepath.Join(filepath.Dir(dst), h.Name) + case filepath.Base(dst) == filepath.Base(h.Name): + target = filepath.Join(filepath.Dir(dst), h.Name) + default: + target = filepath.Join(dst, h.Name) + } + + switch h.Typeflag { + case tar.TypeDir: + if err := extractDir(h, target); err != nil { + return written, err + } + + continue + case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo: + n, err := extractRegular(h, tr, target) + written += n + + if err != nil { + return written, fmt.Errorf("extract regular file %w", err) + } + + continue + case tar.TypeSymlink: + if err := extractSymlink(h, target); err != nil { + return written, fmt.Errorf("extract symbolic link %w", err) + } + + continue + case tar.TypeLink: + if err := extractLink(h, target); err != nil { + return written, fmt.Errorf("extract link %w", err) + } + + continue + case tar.TypeXGlobalHeader: + continue + default: + return written, fmt.Errorf("extract %s, unknown type flag: %c", target, h.Typeflag) + } + } +} + +func extractDir(h *tar.Header, target string) error { + if err := os.MkdirAll(target, os.FileMode(h.Mode)); err != nil { + return fmt.Errorf("create directory <%s> %w", target, err) + } + + return nil +} + +func extractRegular(h *tar.Header, tr io.Reader, target string) (n int64, err error) { + f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(h.Mode)) + if err != nil { + return 0, fmt.Errorf("open extracted file for writing <%s> %w", target, err) + } + + defer internal.CloseWithErrCapturef(&err, f, "extract regular <%s>", target) + + written, err := io.Copy(f, tr) + if err != nil { + return written, fmt.Errorf("copy extracted file for writing <%s> %w", target, err) + } + + return written, nil +} + +func extractSymlink(h *tar.Header, target string) error { + if err := unlink(target); err != nil { + return fmt.Errorf("unlink <%s> %w", target, err) + } + + if err := os.Symlink(h.Linkname, target); err != nil { + return fmt.Errorf("create symbolic link <%s> %w", target, err) + } + + return nil +} + +func extractLink(h *tar.Header, target string) error { + if err := unlink(target); err != nil { + return fmt.Errorf("unlink <%s> %w", target, err) + } + + if err := os.Link(h.Linkname, target); err != nil { + return fmt.Errorf("create hard link <%s> %w", h.Linkname, err) + } + + return nil +} + +func unlink(path string) error { + _, err := os.Lstat(path) + if err == nil { + return os.Remove(path) + } + + return nil +} diff --git a/archive/tar/tar_test.go b/archive/tar/tar_test.go new file mode 100644 index 00000000..0a1a3fc5 --- /dev/null +++ b/archive/tar/tar_test.go @@ -0,0 +1,282 @@ +package tar + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" + "testing" + + "github.com/meltwater/drone-cache/test" + + "github.com/go-kit/kit/log" +) + +var ( + testRoot = "testdata" + testRootMounted = "testdata/mounted" + testRootExtracted = "testdata/extracted" +) + +func TestCreate(t *testing.T) { + test.Ok(t, os.MkdirAll(testRootMounted, 0755)) + test.Ok(t, os.MkdirAll(testRootExtracted, 0755)) + t.Cleanup(func() { os.RemoveAll(testRoot) }) + + for _, tc := range []struct { + name string + ta *Archive + srcs []string + written int64 + err error + }{ + { + name: "empty mount paths", + ta: New(log.NewNopLogger(), true), + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "non-existing mount paths", + ta: New(log.NewNopLogger(), true), + srcs: []string{ + "idonotexist", + "metoo", + }, + written: 0, + err: ErrSourceNotReachable, // os.ErrNotExist || os.ErrPermission + }, + { + name: "existing mount paths", + ta: New(log.NewNopLogger(), true), + srcs: exampleFileTree(t, "tar_create"), + written: 43, // 3 x tmpfile in dir, 1 tmpfile + err: nil, + }, + { + name: "existing mount paths with symbolic links", + ta: New(log.NewNopLogger(), false), + srcs: exampleFileTreeWithSymlinks(t, "tar_create_symlink"), + written: 43, + err: nil, + }, + } { + tc := tc // NOTICE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Setup + dstDir, dstDirClean := test.CreateTempDir(t, "tar_create_archives", testRootMounted) + t.Cleanup(dstDirClean) + + extDir, extDirClean := test.CreateTempDir(t, "tar_create_extracted", testRootExtracted) + t.Cleanup(extDirClean) + + // Run + archivePath := filepath.Join(dstDir, filepath.Clean(tc.name+".tar")) + written, err := create(tc.ta, tc.srcs, archivePath) + if err != nil { + test.Expected(t, err, tc.err) + return + } + + // Test + test.Exists(t, archivePath) + test.Assert(t, written == tc.written, "case %q: written bytes got %d want %v", tc.name, written, tc.written) + + _, err = extract(tc.ta, archivePath, extDir) + test.Ok(t, err) + test.EqualDirs(t, extDir, testRootMounted, tc.srcs) + }) + } +} + +func TestExtract(t *testing.T) { + test.Ok(t, os.MkdirAll(testRootMounted, 0755)) + test.Ok(t, os.MkdirAll(testRootExtracted, 0755)) + t.Cleanup(func() { os.RemoveAll(testRoot) }) + + // Setup + ta := New(log.NewNopLogger(), false) + + arcDir, arcDirClean := test.CreateTempDir(t, "tar_extract_archives", testRootMounted) + t.Cleanup(arcDirClean) + + files := exampleFileTree(t, "tar_extract") + + archivePath := filepath.Join(arcDir, "test.tar") + _, err := create(ta, files, archivePath) + test.Ok(t, err) + + filesWithSymlink := exampleFileTreeWithSymlinks(t, "tar_extract_symlink") + archiveWithSymlinkPath := filepath.Join(arcDir, "test_with_symlink.tar") + _, err = create(ta, filesWithSymlink, archiveWithSymlinkPath) + test.Ok(t, err) + + emptyArchivePath := filepath.Join(arcDir, "empty_test.tar") + _, err = create(ta, []string{}, emptyArchivePath) + test.Ok(t, err) + + badArchivePath := filepath.Join(arcDir, "bad_test.tar") + test.Ok(t, ioutil.WriteFile(badArchivePath, []byte("hello\ndrone\n"), 0644)) + + for _, tc := range []struct { + name string + ta *Archive + archivePath string + srcs []string + written int64 + err error + }{ + { + name: "non-existing archive", + ta: ta, + archivePath: "idonotexist", + srcs: []string{}, + written: 0, + err: os.ErrNotExist, + }, + { + name: "non-existing root destination", + ta: ta, + archivePath: emptyArchivePath, + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "empty archive", + ta: ta, + archivePath: emptyArchivePath, + srcs: []string{}, + written: 0, + err: nil, + }, + { + name: "bad archives", + ta: ta, + archivePath: badArchivePath, + srcs: []string{}, + written: 0, + err: ErrArchiveNotReadable, + }, + { + name: "existing archive", + ta: ta, + archivePath: archivePath, + srcs: files, + written: 43, + err: nil, + }, + { + name: "existing archive with symbolic links", + ta: New(log.NewNopLogger(), false), + archivePath: archiveWithSymlinkPath, + srcs: filesWithSymlink, + written: 43, + err: nil, + }, + } { + tc := tc // NOTE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + // Setup + dstDir, dstDirClean := test.CreateTempDir(t, "tar_extract_"+tc.name, testRootExtracted) + t.Cleanup(dstDirClean) + + // Run + written, err := extract(tc.ta, tc.archivePath, dstDir) + if err != nil { + test.Expected(t, err, tc.err) + return + } + + // Test + test.Assert(t, written == tc.written, "case %q: written bytes got %d want %v", tc.name, written, tc.written) + test.EqualDirs(t, dstDir, testRootMounted, tc.srcs) + }) + } +} + +// Helpers + +func create(a *Archive, srcs []string, dst string) (int64, error) { + pr, pw := io.Pipe() + defer pr.Close() + + var written int64 + go func(w *int64) { + defer pw.Close() + + written, err := a.Create(srcs, pw) + if err != nil { + pw.CloseWithError(err) + } + + *w = written + }(&written) + + content, err := ioutil.ReadAll(pr) + if err != nil { + pr.CloseWithError(err) + return 0, err + } + + if err := ioutil.WriteFile(dst, content, 0644); err != nil { + return 0, err + } + + return written, nil +} + +func extract(a *Archive, src string, dst string) (int64, error) { + pr, pw := io.Pipe() + defer pr.Close() + + f, err := os.Open(src) + if err != nil { + return 0, err + } + + go func() { + defer pw.Close() + + _, err = io.Copy(pw, f) + if err != nil { + pw.CloseWithError(err) + } + }() + + return a.Extract(dst, pr) +} + +// Fixtures + +func exampleFileTree(t *testing.T, name string) []string { + file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes + t.Cleanup(fileClean) + + dir, dirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), testRootMounted) // 10 bytes + t.Cleanup(dirClean) + + return []string{file, dir} +} + +func exampleFileTreeWithSymlinks(t *testing.T, name string) []string { + file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes + t.Cleanup(fileClean) + + dir, dirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), testRootMounted) // 10 bytes + t.Cleanup(dirClean) + + symDir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + symlink := filepath.Join(symDir, name+"_symlink.testfile") + test.Ok(t, os.Symlink(file, symlink)) + t.Cleanup(func() { os.Remove(symlink) }) + + return []string{file, dir, symDir} +} From 15a725e7aa5f8d6eaed6b57bc86c65e60f15fafa Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:16:28 +0200 Subject: [PATCH 05/16] Refactor Cache --- cache/cache.go | 361 ++++------------------------------------ cache/cache_test.go | 9 +- cache/flusher.go | 54 ++++++ cache/flusher_test.go | 8 + cache/option.go | 35 ++-- cache/rebuilder.go | 155 +++++++++++++++++ cache/rebuilder_test.go | 8 + cache/restorer.go | 134 +++++++++++++++ cache/restorer_test.go | 8 + cache/util.go | 13 ++ 10 files changed, 428 insertions(+), 357 deletions(-) create mode 100644 cache/flusher.go create mode 100644 cache/flusher_test.go create mode 100644 cache/rebuilder.go create mode 100644 cache/rebuilder_test.go create mode 100644 cache/restorer.go create mode 100644 cache/restorer_test.go create mode 100644 cache/util.go diff --git a/cache/cache.go b/cache/cache.go index deecd803..9c0db2d2 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -2,349 +2,56 @@ package cache import ( - "archive/tar" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" + "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/archive" + "github.com/meltwater/drone-cache/key" + "github.com/meltwater/drone-cache/storage" ) -// Backend implements operations for caching files -type Backend interface { - Get(string) (io.ReadCloser, error) - Put(string, io.ReadSeeker) error +// Cache defines Cache functionality and stores configuration. +type Cache interface { + Rebuilder + Restorer + Flusher } -// Cache contains configuration for Cache functionality -type Cache struct { - logger log.Logger - - b Backend - opts options +// Rebuilder TODO +type Rebuilder interface { + // Rebuild TODO + Rebuild(srcs []string) error } -// New creates a new cache with given parameters -func New(logger log.Logger, b Backend, opts ...Option) Cache { - options := options{ - archiveFmt: DefaultArchiveFormat, - compressionLevel: DefaultCompressionLevel, - } - - for _, o := range opts { - o.apply(&options) - } - - return Cache{ - logger: log.With(logger, "component", "cache"), - b: b, - opts: options, - } +// Restorer TODO +type Restorer interface { + // Restore TODO + Restore(srcs []string) error } -// Push pushes the archived file to the cache -func (c Cache) Push(src, dst string) error { - // 1. check if source is reachable - src, err := filepath.Abs(filepath.Clean(src)) - if err != nil { - return fmt.Errorf("read source directory %w", err) - } - - level.Info(c.logger).Log("msg", "archiving directory", "src", src) - - // 2. create a temporary file for the archive - if err := os.MkdirAll("/tmp", os.FileMode(0755)); err != nil { - return fmt.Errorf("create tmp directory %w", err) - } - - dir, err := ioutil.TempDir("", "") - if err != nil { - return fmt.Errorf("create tmp folder for archive %w", err) - } - - archivePath := filepath.Join(dir, "archive.tar") - - file, err := os.Create(archivePath) - if err != nil { - return fmt.Errorf("create tarball file <%s> %w", archivePath, err) - } - - tw, twCloser, err := archiveWriter(file, c.opts.archiveFmt, c.opts.compressionLevel) - if err != nil { - return fmt.Errorf("initialize archive writer %w", err) - } - - level.Debug(c.logger).Log("msg", "archive compression level", "level", c.opts.compressionLevel) - - closer := func() { - twCloser() - file.Close() - } - - defer closer() - - // 3. walk through source and add each file - err = filepath.Walk(src, writeToArchive(tw, c.opts.skipSymlinks)) - if err != nil { - return fmt.Errorf("add all files to archive %w", err) - } - - // 4. Close resources before upload - closer() - - // 5. upload archive file to server - level.Info(c.logger).Log("msg", "uploading archived directory", "src", src, "dst", dst) - - return c.pushArchive(dst, archivePath) -} - -func (c Cache) pushArchive(dst, archivePath string) error { - f, err := os.Open(archivePath) - if err != nil { - return fmt.Errorf("open archived file to send %w", err) - } - defer f.Close() - - if err := c.b.Put(dst, f); err != nil { - return fmt.Errorf("upload file %w", err) - } - - return nil -} - -// Pull fetches the archived file from the cache and restores to the host machine's file system -func (c Cache) Pull(src, dst string) error { - level.Info(c.logger).Log("msg", "downloading archived directory", "src", src) - // 1. download archive - rc, err := c.b.Get(src) - if err != nil { - return fmt.Errorf("get file from storage backend %w", err) - } - defer rc.Close() - - // 2. extract archive - level.Info(c.logger).Log("msg", "extracting archived directory", "src", src, "dst", dst) - - if err := extractFromArchive(archiveReader(rc, c.opts.archiveFmt)); err != nil { - return fmt.Errorf("extract files from downloaded archive %w", err) - } - - return nil +// Flusher TODO +type Flusher interface { + // Flush TODO + Flush(srcs []string) error } -// Helpers - -func archiveWriter(w io.Writer, f string, l int) (*tar.Writer, func(), error) { - switch f { - case "gzip": - gw, err := gzip.NewWriterLevel(w, l) - if err != nil { - return nil, nil, fmt.Errorf("create archive writer %w", err) - } - - tw := tar.NewWriter(gw) - - return tw, func() { - gw.Close() - tw.Close() - }, nil - default: - tw := tar.NewWriter(w) - return tw, func() { tw.Close() }, nil - } +type cache struct { + Rebuilder + Restorer + Flusher } -func writeToArchive(tw *tar.Writer, skipSymlinks bool) func(path string, fi os.FileInfo, err error) error { - return func(path string, fi os.FileInfo, pErr error) error { - if pErr != nil { - return pErr - } - - var h *tar.Header - // Create header for Regular files and Directories - var err error - h, err = tar.FileInfoHeader(fi, "") - if err != nil { - return fmt.Errorf("create header for <%s> %w", path, err) - } - - if isSymlink(fi) { - if skipSymlinks { - return nil - } +// New creates a new cache with given parameters. +func New(logger log.Logger, s storage.Storage, a archive.Archive, g key.Generator, opts ...Option) Cache { + options := options{} - var err error - if h, err = createSymlinkHeader(fi, path); err != nil { - return fmt.Errorf("create header for symbolic link %w", err) - } - } - - h.Name = path // to give absolute path - - if err := tw.WriteHeader(h); err != nil { - return fmt.Errorf("write header for <%s> %w", path, err) - } - - if fi.Mode().IsRegular() { // open and write only if it is a regular file - if err := writeFileToArchive(tw, path); err != nil { - return fmt.Errorf("write file to archive %w", err) - } - } - - return nil - } -} - -func createSymlinkHeader(fi os.FileInfo, path string) (*tar.Header, error) { - lnk, err := os.Readlink(path) - if err != nil { - return nil, fmt.Errorf("read link <%s> %w", path, err) - } - - h, err := tar.FileInfoHeader(fi, lnk) - if err != nil { - return nil, fmt.Errorf("create symlink header for <%s> %w", path, err) - } - - return h, nil -} - -func writeFileToArchive(tw io.Writer, path string) error { - f, err := os.Open(path) - if err != nil { - return fmt.Errorf("open file <%s> %w", path, err) - } - defer f.Close() - - if _, err := io.Copy(tw, f); err != nil { - return fmt.Errorf("copy the file <%s> data to the tarball %w", path, err) - } - - return nil -} - -func archiveReader(r io.Reader, archiveFmt string) *tar.Reader { - tr := tar.NewReader(r) - - switch archiveFmt { - case "gzip": - gzr, err := gzip.NewReader(r) - if err != nil { - gzr.Close() - return tr - } - - return tar.NewReader(gzr) - default: - return tr - } -} - -func extractFromArchive(tr *tar.Reader) error { - for { - h, err := tr.Next() - - switch { - case err == io.EOF: // if no more files are found return - return nil - case err != nil: // return any other error - return fmt.Errorf("tar reader failed %w", err) - case h == nil: // if the header is nil, skip it - continue - } - - switch h.Typeflag { - case tar.TypeDir: - if err := extractDir(h); err != nil { - return err - } - - continue - case tar.TypeReg, tar.TypeRegA, tar.TypeChar, tar.TypeBlock, tar.TypeFifo: - if err := extractRegular(h, tr); err != nil { - return fmt.Errorf("extract regular file %w", err) - } - - continue - case tar.TypeSymlink: - if err := extractSymlink(h); err != nil { - return fmt.Errorf("extract symbolic link %w", err) - } - - continue - case tar.TypeLink: - if err := extractLink(h); err != nil { - return fmt.Errorf("extract link %w", err) - } - - continue - case tar.TypeXGlobalHeader: - continue - default: - return fmt.Errorf("extract %s, unknown type flag: %c", h.Name, h.Typeflag) - } - } -} - -func extractDir(h *tar.Header) error { - if err := os.MkdirAll(h.Name, os.FileMode(h.Mode)); err != nil { - return fmt.Errorf("create directory <%s> %w", h.Name, err) - } - - return nil -} - -func extractRegular(h *tar.Header, tr io.Reader) error { - f, err := os.OpenFile(h.Name, os.O_CREATE|os.O_RDWR, os.FileMode(h.Mode)) - if err != nil { - return fmt.Errorf("open extracted file for writing <%s> %w", h.Name, err) - } - defer f.Close() - - if _, err := io.Copy(f, tr); err != nil { - return fmt.Errorf("copy extracted file for writing <%s> %w", h.Name, err) - } - - return nil -} - -func extractSymlink(h *tar.Header) error { - if err := unlink(h.Name); err != nil { - return fmt.Errorf("unlink <%s> %w", h.Name, err) - } - - if err := os.Symlink(h.Linkname, h.Name); err != nil { - return fmt.Errorf("create symbolic link <%s> %w", h.Name, err) - } - - return nil -} - -func extractLink(h *tar.Header) error { - if err := unlink(h.Name); err != nil { - return fmt.Errorf("unlink <%s> %w", h.Name, err) - } - - if err := os.Link(h.Linkname, h.Name); err != nil { - return fmt.Errorf("create hard link <%s> %w", h.Linkname, err) + for _, o := range opts { + o.apply(&options) } - return nil -} - -func isSymlink(fi os.FileInfo) bool { - return fi.Mode()&os.ModeSymlink != 0 -} - -func unlink(path string) error { - _, err := os.Lstat(path) - if err == nil { - return os.Remove(path) + return &cache{ + NewRebuilder(log.With(logger, "component", "rebuilder"), s, a, g, options.fallbackGenerator, options.namespace), + NewRestorer(log.With(logger, "component", "restorer"), s, a, g, options.fallbackGenerator, options.namespace), + NewFlusher(log.With(logger, "component", "flusher"), s, time.Hour), } - - return nil } diff --git a/cache/cache_test.go b/cache/cache_test.go index 0e4fd84f..11b8e410 100644 --- a/cache/cache_test.go +++ b/cache/cache_test.go @@ -1,9 +1,8 @@ package cache -import ( - "testing" -) +import "testing" -func TestCacheTruth(t *testing.T) { - t.Skip("skipping cache package tests") +func TestCache(t *testing.T) { + // Implement me! + t.Skip("skipping unimplemented test.") } diff --git a/cache/flusher.go b/cache/flusher.go new file mode 100644 index 00000000..79875984 --- /dev/null +++ b/cache/flusher.go @@ -0,0 +1,54 @@ +package cache + +import ( + "fmt" + "time" + + "github.com/meltwater/drone-cache/storage" + "github.com/meltwater/drone-cache/storage/backend" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +type flusher struct { + logger log.Logger + + store storage.Storage + dirty func(backend.FileEntry) bool +} + +// NewFlusher creates a new cache flusher. +func NewFlusher(logger log.Logger, s storage.Storage, ttl time.Duration) Flusher { + return flusher{logger: logger, store: s, dirty: IsExpired(ttl)} +} + +// Flush cleans the expired files from the cache. +func (f flusher) Flush(srcs []string) error { + for _, src := range srcs { + level.Info(f.logger).Log("msg", "Cleaning files", "src", src) + + files, err := f.store.List(src) + if err != nil { + return fmt.Errorf("flusher list %w", err) + } + + for _, file := range files { + if f.dirty(file) { + err := f.store.Delete(file.Path) + if err != nil { + return fmt.Errorf("flusher delete %w", err) + } + } + } + } + + return nil +} + +// IsExpired creates a function to check if file expired. +func IsExpired(ttl time.Duration) func(file backend.FileEntry) bool { + return func(file backend.FileEntry) bool { + return time.Now().After(file.LastModified.Add(ttl)) + } +} diff --git a/cache/flusher_test.go b/cache/flusher_test.go new file mode 100644 index 00000000..b7192fd5 --- /dev/null +++ b/cache/flusher_test.go @@ -0,0 +1,8 @@ +package cache + +import "testing" + +func TestFlush(t *testing.T) { + // Implement me! + t.Skip("skipping unimplemented test.") +} diff --git a/cache/option.go b/cache/option.go index 89a9a783..f53b2028 100644 --- a/cache/option.go +++ b/cache/option.go @@ -1,21 +1,13 @@ package cache -import ( - "compress/flate" -) - -const ( - DefaultCompressionLevel = flate.DefaultCompression - DefaultArchiveFormat = "tar" -) +import "github.com/meltwater/drone-cache/key" type options struct { - archiveFmt string - compressionLevel int - skipSymlinks bool + namespace string + fallbackGenerator key.Generator } -// Option overrides behavior of Cache. +// Option overrides behavior of Archive. type Option interface { apply(*options) } @@ -26,23 +18,16 @@ func (f optionFunc) apply(o *options) { f(o) } -// WithSkipSymlinks sets skip symlink option. -func WithSkipSymlinks(b bool) Option { - return optionFunc(func(o *options) { - o.skipSymlinks = b - }) -} - -// WithArchiveFormat sets archive format option. -func WithArchiveFormat(s string) Option { +// WithNamespace sets namespace option. +func WithNamespace(s string) Option { return optionFunc(func(o *options) { - o.archiveFmt = s + o.namespace = s }) } -// WithCompressionLevel sets compression level option. -func WithCompressionLevel(i int) Option { +// WithFallbackGenerator sets fallback key generator option. +func WithFallbackGenerator(g key.Generator) Option { return optionFunc(func(o *options) { - o.compressionLevel = i + o.fallbackGenerator = g }) } diff --git a/cache/rebuilder.go b/cache/rebuilder.go new file mode 100644 index 00000000..2e8d03f9 --- /dev/null +++ b/cache/rebuilder.go @@ -0,0 +1,155 @@ +package cache + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/meltwater/drone-cache/archive" + "github.com/meltwater/drone-cache/internal" + "github.com/meltwater/drone-cache/key" + "github.com/meltwater/drone-cache/storage" + + "github.com/dustin/go-humanize" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +type rebuilder struct { + logger log.Logger + + a archive.Archive + s storage.Storage + g key.Generator + fg key.Generator + + namespace string +} + +// NewRebuilder TODO +func NewRebuilder(logger log.Logger, s storage.Storage, a archive.Archive, g key.Generator, fg key.Generator, namespace string) Rebuilder { //nolint:lll + return rebuilder{logger, a, s, g, fg, namespace} +} + +// Rebuild TODO +func (r rebuilder) Rebuild(srcs []string) error { + level.Info(r.logger).Log("msg", "rebuilding cache") + + now := time.Now() + + key, err := r.generateKey() + if err != nil { + return fmt.Errorf("generate key %w", err) + } + + var ( + wg sync.WaitGroup + errs = &internal.MultiError{} + ) + + for _, src := range srcs { + if _, err := os.Lstat(src); err != nil { + return fmt.Errorf("source <%s>, make sure file or directory exists and readable %w", src, err) + } + + dst := filepath.Join(r.namespace, key, src) + + level.Info(r.logger).Log("msg", "rebuilding cache for directory", "local", src, "remote", dst) + + wg.Add(1) //nolint:gomnd + + go func(dst, src string) { + defer wg.Done() + + if err := r.rebuild(src, dst); err != nil { + errs.Add(fmt.Errorf("upload from <%s> to <%s> %w", src, dst, err)) + } + }(dst, src) + } + + wg.Wait() + + if errs.Err() != nil { + return fmt.Errorf("rebuild failed %w", errs) + } + + level.Info(r.logger).Log("msg", "cache built", "took", time.Since(now)) + + return nil +} + +// rebuild pushes the archived file to the cache. +func (r rebuilder) rebuild(src, dst string) (err error) { + src, err = filepath.Abs(filepath.Clean(src)) + if err != nil { + return fmt.Errorf("clean source path %w", err) + } + + pr, pw := io.Pipe() + defer internal.CloseWithErrCapturef(&err, pr, "rebuild, pr close <%s>", src) + + var written int64 + + go func(wrt *int64) { + defer internal.CloseWithErrLogf(r.logger, pw, "pw close defer") + + level.Info(r.logger).Log("msg", "archiving directory", "src", src) + + written, err := r.a.Create([]string{src}, pw) + if err != nil { + if err := pw.CloseWithError(fmt.Errorf("archive write, pipe writer failed %w", err)); err != nil { + level.Error(r.logger).Log("msg", "pw close", "err", err) + } + } + + *wrt += written + }(&written) + + level.Info(r.logger).Log("msg", "uploading archived directory", "local", src, "remote", dst) + + sw := &statWriter{} + tr := io.TeeReader(pr, sw) + + if err := r.s.Put(dst, tr); err != nil { + err = fmt.Errorf("upload file, pipe reader failed %w", err) + if err := pr.CloseWithError(err); err != nil { + level.Error(r.logger).Log("msg", "pr close", "err", err) + } + + return err + } + + level.Debug(r.logger).Log( + "msg", "archive created", + "local", src, + "remote", dst, + "archived bytes", humanize.Bytes(uint64(sw.written)), + "read bytes", humanize.Bytes(uint64(written)), + "ratio", fmt.Sprintf("%%%0.2f", float64(sw.written)/float64(written)*100.0), //nolint:gomnd + ) + + return nil +} + +// Helpers + +func (r rebuilder) generateKey(parts ...string) (string, error) { + key, err := r.g.Generate(parts...) + if err == nil { + return key, nil + } + + if r.fg != nil { + level.Error(r.logger).Log("msg", "falling back to fallback key generator", "err", err) + + key, err = r.fg.Generate(parts...) + if err == nil { + return key, nil + } + } + + return "", err +} diff --git a/cache/rebuilder_test.go b/cache/rebuilder_test.go new file mode 100644 index 00000000..c827f249 --- /dev/null +++ b/cache/rebuilder_test.go @@ -0,0 +1,8 @@ +package cache + +import "testing" + +func TestRebuild(t *testing.T) { + // Implement me! + t.Skip("skipping unimplemented test.") +} diff --git a/cache/restorer.go b/cache/restorer.go new file mode 100644 index 00000000..60324493 --- /dev/null +++ b/cache/restorer.go @@ -0,0 +1,134 @@ +package cache + +import ( + "fmt" + "io" + "path/filepath" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/meltwater/drone-cache/archive" + "github.com/meltwater/drone-cache/internal" + "github.com/meltwater/drone-cache/key" + "github.com/meltwater/drone-cache/storage" +) + +type restorer struct { + logger log.Logger + + a archive.Archive + s storage.Storage + g key.Generator + fg key.Generator + + namespace string +} + +// NewRestorer TODO +func NewRestorer(logger log.Logger, s storage.Storage, a archive.Archive, g key.Generator, fg key.Generator, namespace string) Restorer { //nolint:lll + return restorer{logger, a, s, g, fg, namespace} +} + +// Restore TODO +func (r restorer) Restore(dsts []string) error { + level.Info(r.logger).Log("msg", "restoring cache") + + now := time.Now() + + key, err := r.generateKey() + if err != nil { + return fmt.Errorf("generate key %w", err) + } + + var ( + wg sync.WaitGroup + errs = &internal.MultiError{} + ) + + for _, dst := range dsts { + src := filepath.Join(r.namespace, key, dst) + + level.Info(r.logger).Log("msg", "restoring directory", "local", dst, "remote", src) + + wg.Add(1) //nolint:gomnd + + go func(src, dst string) { + defer wg.Done() + + if err := r.restore(src, dst); err != nil { + errs.Add(fmt.Errorf("download from <%s> to <%s> %w", src, dst, err)) + } + }(src, dst) + } + + wg.Wait() + + if errs.Err() != nil { + return fmt.Errorf("restore failed %w", errs) + } + + level.Info(r.logger).Log("msg", "cache restored", "took", time.Since(now)) + + return nil +} + +// restore fetches the archived file from the cache and restores to the host machine's file system. +func (r restorer) restore(src, dst string) (err error) { + pr, pw := io.Pipe() + defer internal.CloseWithErrCapturef(&err, pr, "rebuild, pr close <%s>", dst) + + go func() { + defer internal.CloseWithErrLogf(r.logger, pw, "pw close defer") + + level.Info(r.logger).Log("msg", "downloading archived directory", "remote", src, "local", dst) + + if err := r.s.Get(src, pw); err != nil { + if err := pw.CloseWithError(fmt.Errorf("get file from storage backend, pipe writer failed %w", err)); err != nil { + level.Error(r.logger).Log("msg", "pw close", "err", err) + } + } + }() + + level.Info(r.logger).Log("msg", "extracting archived directory", "remote", src, "local", dst) + + written, err := r.a.Extract(dst, pr) + if err != nil { + err = fmt.Errorf("extract files from downloaded archive, pipe reader failed %w", err) + if err := pr.CloseWithError(err); err != nil { + level.Error(r.logger).Log("msg", "pr close", "err", err) + } + + return err + } + + level.Debug(r.logger).Log( + "msg", "archive extracted", + "local", dst, + "remote", src, + "raw size", written, + ) + + return nil +} + +// Helpers + +func (r restorer) generateKey(parts ...string) (string, error) { + key, err := r.g.Generate(parts...) + if err == nil { + return key, nil + } + + if r.fg != nil { + level.Error(r.logger).Log("msg", "falling back to fallback key generator", "err", err) + + key, err = r.fg.Generate(parts...) + if err == nil { + return key, nil + } + } + + return "", err +} diff --git a/cache/restorer_test.go b/cache/restorer_test.go new file mode 100644 index 00000000..8deaf96d --- /dev/null +++ b/cache/restorer_test.go @@ -0,0 +1,8 @@ +package cache + +import "testing" + +func TestRestore(t *testing.T) { + // Implement me! + t.Skip("skipping unimplemented test.") +} diff --git a/cache/util.go b/cache/util.go new file mode 100644 index 00000000..064b4052 --- /dev/null +++ b/cache/util.go @@ -0,0 +1,13 @@ +package cache + +// statWriter implements io.Writer and keeps track of the written bytes. +type statWriter struct { + written int64 +} + +func (s *statWriter) Write(p []byte) (n int, err error) { + size := len(p) + s.written += int64(size) + + return size, nil +} From 31ba5d0c6fbce8f8d04382ab8acf5c73f1e92f79 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:17:53 +0200 Subject: [PATCH 06/16] Refactor plugin layer and integration tests --- internal/errors.go | 62 +++ internal/io.go | 62 +++ internal/plugin/config.go | 37 ++ internal/plugin/plugin.go | 123 +++++ internal/plugin/plugin_test.go | 450 +++++++++++++++++ main.go | 895 +++++++++++++++++---------------- plugin/plugin.go | 208 -------- plugin/plugin_test.go | 598 ---------------------- test/assert.go | 224 +++++++++ test/helpers.go | 81 +++ 10 files changed, 1513 insertions(+), 1227 deletions(-) create mode 100644 internal/errors.go create mode 100644 internal/io.go create mode 100644 internal/plugin/config.go create mode 100644 internal/plugin/plugin.go create mode 100644 internal/plugin/plugin_test.go delete mode 100644 plugin/plugin.go delete mode 100644 plugin/plugin_test.go create mode 100644 test/assert.go create mode 100644 test/helpers.go diff --git a/internal/errors.go b/internal/errors.go new file mode 100644 index 00000000..1d44ade6 --- /dev/null +++ b/internal/errors.go @@ -0,0 +1,62 @@ +package internal + +import ( + "bytes" + "fmt" + "sync" +) + +// NOTICE: Modified version of https://github.com/prometheus/prometheus/blob/master/tsdb/errors/errors.go + +// The MultiError type implements the error interface, and contains the +// Errors used to construct it. +type MultiError struct { + mu sync.Mutex + errs []error +} + +// Returns a concatenated string of the contained errors +func (me *MultiError) Error() string { + var buf bytes.Buffer + + me.mu.Lock() + defer me.mu.Unlock() + + if len(me.errs) > 1 { //nolint:gomnd + fmt.Fprintf(&buf, "%d errors: ", len(me.errs)) + } + + for i, err := range me.errs { + if i != 0 { + buf.WriteString(";\n") + } + + buf.WriteString(err.Error()) + } + + return buf.String() +} + +// Add adds the error to the error list if it is not nil. +func (me *MultiError) Add(err error) { + if err == nil { + return + } + + me.mu.Lock() + defer me.mu.Unlock() + + me.errs = append(me.errs, err) +} + +// Err returns the error list as an error or nil if it is empty. +func (me *MultiError) Err() error { + me.mu.Lock() + defer me.mu.Unlock() + + if len(me.errs) == 0 { + return nil + } + + return me +} diff --git a/internal/io.go b/internal/io.go new file mode 100644 index 00000000..62f3e840 --- /dev/null +++ b/internal/io.go @@ -0,0 +1,62 @@ +package internal + +import ( + "errors" + "fmt" + "io" + "os" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +// CloseWithErrLogf is making sure we log every error, even those from best effort tiny closers. +func CloseWithErrLogf(logger log.Logger, closer io.Closer, format string, a ...interface{}) { + err := close(closer) + if err == nil { + return + } + + if logger == nil { + logger = log.NewLogfmtLogger(os.Stderr) + } + + level.Warn(logger).Log("msg", "detected close error", "err", fmt.Errorf(format+" %w", append(a, err)...)) +} + +// CloseWithErrCapturef runs function and on error return error by argument including the given error.. +func CloseWithErrCapturef(err *error, closer io.Closer, format string, a ...interface{}) { + if err != nil { + cErr := close(closer) + if cErr == nil { + return + } + + mErr := MultiError{} + mErr.Add(*err) + mErr.Add(fmt.Errorf(format+" %w", append(a, cErr)...)) + *err = mErr.Err() + + return + } + + cErr := close(closer) + if cErr == nil { + return + } + + *err = cErr +} + +func close(closer io.Closer) error { + err := closer.Close() + if err == nil { + return nil + } + + if errors.Is(err, os.ErrClosed) { + return nil + } + + return err +} diff --git a/internal/plugin/config.go b/internal/plugin/config.go new file mode 100644 index 00000000..297b0999 --- /dev/null +++ b/internal/plugin/config.go @@ -0,0 +1,37 @@ +package plugin + +import ( + "time" + + "github.com/meltwater/drone-cache/storage/backend/azure" + "github.com/meltwater/drone-cache/storage/backend/filesystem" + "github.com/meltwater/drone-cache/storage/backend/gcs" + "github.com/meltwater/drone-cache/storage/backend/s3" + "github.com/meltwater/drone-cache/storage/backend/sftp" +) + +// Config plugin-specific parameters and secrets. +type Config struct { + ArchiveFormat string + Backend string + CacheKeyTemplate string + + // Modes + Debug bool + Rebuild bool + Restore bool + + // Optional + SkipSymlinks bool + CompressionLevel int + StorageOperationTimeout time.Duration + + Mount []string + + // Backend + S3 s3.Config + FileSystem filesystem.Config + SFTP sftp.Config + Azure azure.Config + GCS gcs.Config +} diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go new file mode 100644 index 00000000..a3b438e0 --- /dev/null +++ b/internal/plugin/plugin.go @@ -0,0 +1,123 @@ +// Package plugin for caching directories using given backends +package plugin + +import ( + "errors" + "fmt" + "os" + + "github.com/meltwater/drone-cache/archive" + "github.com/meltwater/drone-cache/cache" + "github.com/meltwater/drone-cache/internal/metadata" + "github.com/meltwater/drone-cache/key" + keygen "github.com/meltwater/drone-cache/key/generator" + "github.com/meltwater/drone-cache/storage" + "github.com/meltwater/drone-cache/storage/backend" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" +) + +// Error recognized error from plugin. +type Error string + +// Error TODO +func (e Error) Error() string { return string(e) } + +// Unwrap TODO +func (e Error) Unwrap() error { return e } + +// Plugin stores metadata about current plugin. +type Plugin struct { + logger log.Logger + + Metadata metadata.Metadata + Config Config +} + +// New TODO +func New(logger log.Logger) *Plugin { + return &Plugin{logger: logger} +} + +// Exec entry point of Plugin, where the magic happens. +func (p *Plugin) Exec() error { + cfg := p.Config + + // 1. Check parameters + if cfg.Debug { + level.Debug(p.logger).Log("msg", "DEBUG MODE enabled!") + + for _, pair := range os.Environ() { + level.Debug(p.logger).Log("var", pair) + } + + level.Debug(p.logger).Log("msg", "plugin initialized wth config", "config", fmt.Sprintf("%#v", p.Config)) + level.Debug(p.logger).Log("msg", "plugin initialized with metadata", "metadata", fmt.Sprintf("%#v", p.Metadata)) + } + + // FLUSH + + if cfg.Rebuild && cfg.Restore { + return errors.New("rebuild and restore are mutually exclusive, please set only one of them") + } + + var options []cache.Option + options = append(options, cache.WithNamespace(p.Metadata.Repo.Name)) + + var generator key.Generator + if cfg.CacheKeyTemplate != "" { + generator = keygen.NewMetadata(p.logger, cfg.CacheKeyTemplate, p.Metadata) + if err := generator.Check(); err != nil { + return fmt.Errorf("parse failed, falling back to default %w", err) + } + + options = append(options, cache.WithFallbackGenerator(keygen.NewHash(p.Metadata.Commit.Branch))) + } else { + generator = keygen.NewHash(p.Metadata.Commit.Branch) + options = append(options, cache.WithFallbackGenerator(keygen.NewStatic(p.Metadata.Commit.Branch))) + } + + // 2. Initialize storage backend. + b, err := backend.FromConfig(p.logger, cfg.Backend, backend.Config{ + Debug: cfg.Debug, + Azure: cfg.Azure, + FileSystem: cfg.FileSystem, + GCS: cfg.GCS, + S3: cfg.S3, + SFTP: cfg.SFTP, + }) + if err != nil { + return fmt.Errorf("initialize backend <%s> %w", cfg.Backend, err) + } + + // 3. Initialize cache. + c := cache.New(p.logger, + storage.New(p.logger, b, cfg.StorageOperationTimeout), + archive.FromFormat(p.logger, cfg.ArchiveFormat, + archive.WithSkipSymlinks(cfg.SkipSymlinks), + archive.WithCompressionLevel(cfg.CompressionLevel), + ), + generator, + options..., + ) + + // 4. Select mode + if cfg.Rebuild { + if err := c.Rebuild(p.Config.Mount); err != nil { + level.Debug(p.logger).Log("err", fmt.Sprintf("%+v\n", err)) + return Error(fmt.Sprintf("[IMPORTANT] build cache, %+v\n", err)) + } + } + + if cfg.Restore { + if err := c.Restore(p.Config.Mount); err != nil { + level.Debug(p.logger).Log("err", fmt.Sprintf("%+v\n", err)) + return Error(fmt.Sprintf("[IMPORTANT] restore cache, %+v\n", err)) + } + } + + // FLUSH + + return nil +} diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go new file mode 100644 index 00000000..73da6e2c --- /dev/null +++ b/internal/plugin/plugin_test.go @@ -0,0 +1,450 @@ +// +build integration + +package plugin + +import ( + "context" + "crypto/tls" + "fmt" + "net/http" + "os" + "path/filepath" + "strconv" + "strings" + "testing" + "time" + + gcstorage "cloud.google.com/go/storage" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + awss3 "github.com/aws/aws-sdk-go/service/s3" + "github.com/go-kit/kit/log" + pkgsftp "github.com/pkg/sftp" + "golang.org/x/crypto/ssh" + "google.golang.org/api/option" + + "github.com/meltwater/drone-cache/archive" + "github.com/meltwater/drone-cache/internal/metadata" + "github.com/meltwater/drone-cache/storage/backend" + "github.com/meltwater/drone-cache/storage/backend/azure" + "github.com/meltwater/drone-cache/storage/backend/filesystem" + "github.com/meltwater/drone-cache/storage/backend/gcs" + "github.com/meltwater/drone-cache/storage/backend/s3" + "github.com/meltwater/drone-cache/storage/backend/sftp" + "github.com/meltwater/drone-cache/test" +) + +const ( + testRoot = "testdata" + testRootMounted = "testdata/mounted" + testRootMoved = "testdata/moved" + defaultStorageOperationTimeout = 5 * time.Second + defaultPublicHost = "localhost:4443" +) + +var publicHost = getEnv("TEST_STORAGE_EMULATOR_HOST", defaultPublicHost) + +type setupBackend func(*testing.T, *Config, string) + +var ( + backends = map[string]setupBackend{ + backend.Azure: setupAzure, + backend.FileSystem: setupFileSystem, + backend.GCS: setupGCS, + backend.S3: setupS3, + backend.SFTP: setupSFTP, + } + + formats = []string{ + archive.Gzip, + archive.Tar, + } +) + +func TestPlugin(t *testing.T) { + test.Ok(t, os.MkdirAll(testRootMounted, 0755)) + test.Ok(t, os.MkdirAll(testRootMoved, 0755)) + t.Cleanup(func() { + os.RemoveAll(testRoot) + os.Unsetenv("STORAGE_EMULATOR_HOST") // NOTICE: Only needed for GCS + }) + + cases := []struct { + name string + mount func(string) []string + cacheKey string + success bool + }{ + { + name: "existing mount", + mount: func(name string) []string { + return exampleFileTree(t, name, make([]byte, 1*1024)) + }, + success: true, + }, + { + name: "non-existing mount", + mount: func(_ string) []string { + return []string{"idonotexist"} + }, + success: false, + }, + { + name: "empty mount", + mount: func(name string) []string { + return []string{exampleDir(t, name)} + }, + success: true, + }, + { + name: "existing mount with cache key", + mount: func(name string) []string { + return exampleFileTree(t, name, make([]byte, 1*1024)) + }, + cacheKey: "{{ .Repo.Name }}_{{ .Commit.Branch }}_{{ .Build.Number }}", + success: true, + }, + { + name: "existing mount with symlink", + mount: func(name string) []string { + return exampleFileTreeWithSymlinks(t, name, make([]byte, 1*1024)) + }, + success: true, + }, + // NOTICE: Slows down test runs significantly, disabled for now. Will be introduced with a special flag. + // { + // name: "existing mount with large file", + // mount: func(name string) []string { + // return exampleFileTree(t, "existing", make([]byte, 1*1024*1024)) + // }, + // success: true, + // }, + } + + for i, tc := range cases { + i, tc := i, tc // NOTICE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables. + for _, fmt := range formats { + for b, setup := range backends { + name := strings.Join([]string{strconv.Itoa(i), tc.name, b, fmt}, "-") + t.Run(name, func(t *testing.T) { + // Setup + c := defaultConfig() + setup(t, c, name) + paths := tc.mount(tc.name) + mount(c, paths...) + cacheKey(c, tc.cacheKey) + format(c, fmt) + + // Rebuild run + { + plugin := newPlugin(rebuild(c)) + if !tc.success { + test.NotOk(t, plugin.Exec()) + return + } + + test.Ok(t, plugin.Exec()) + } + + // Move source to compare later + dir, cleanup := test.CreateTempDir(t, sanitize(name), testRootMoved) + t.Cleanup(cleanup) + + for _, p := range paths { + rel, err := filepath.Rel(testRootMounted, p) + test.Ok(t, err) + dst := filepath.Join(dir, rel) + test.Ok(t, os.MkdirAll(filepath.Dir(dst), 0755)) + test.Ok(t, os.Rename(p, dst)) + } + + // Restore run + { + if _, ok := os.LookupEnv("STORAGE_EMULATOR_HOST"); !ok { // NOTICE: Only needed for GCS + test.Ok(t, os.Setenv("STORAGE_EMULATOR_HOST", publicHost)) + } + + plugin := newPlugin(restore(c)) + test.Ok(t, plugin.Exec()) + + test.Ok(t, os.Unsetenv("STORAGE_EMULATOR_HOST")) // NOTICE: Only needed for GCS + } + + // Compare + test.EqualDirs(t, dir, testRootMounted, paths) + }) + } + } + } +} + +// Plugin configuration + +func defaultConfig() *Config { + return &Config{ + CompressionLevel: archive.DefaultCompressionLevel, + StorageOperationTimeout: defaultStorageOperationTimeout, + } +} + +func rebuild(c *Config) *Config { + c.Restore = false + c.Rebuild = true + return c +} + +func restore(c *Config) *Config { + c.Restore = true + c.Rebuild = false + return c +} + +func mount(c *Config, mount ...string) *Config { + c.Mount = mount + return c +} + +func cacheKey(c *Config, key string) *Config { + c.CacheKeyTemplate = key + return c +} + +func format(c *Config, fmt string) *Config { + c.ArchiveFormat = fmt + return c +} + +func newPlugin(c *Config) Plugin { + var logger log.Logger + if testing.Verbose() { + logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) + } else { + logger = log.NewNopLogger() + } + + return Plugin{ + logger: logger, + Metadata: metadata.Metadata{ + Repo: metadata.Repo{ + Branch: "master", + Name: "drone-cache", + }, + Commit: metadata.Commit{ + Branch: "master", + }, + }, + Config: *c, + } +} + +// Fixtures + +func exampleDir(t *testing.T, name string) string { + name = sanitize(name) + + dir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + return dir +} + +func exampleFileTree(t *testing.T, name string, content []byte) []string { + name = sanitize(name) + + file, fileClean := test.CreateTempFile(t, name, content, testRootMounted) + t.Cleanup(fileClean) + + dir, dirClean := test.CreateTempFilesInDir(t, name, content, testRootMounted) + t.Cleanup(dirClean) + + return []string{file, dir} +} + +func exampleFileTreeWithSymlinks(t *testing.T, name string, content []byte) []string { + name = sanitize(name) + + file, fileClean := test.CreateTempFile(t, name, content, testRootMounted) + t.Cleanup(fileClean) + + dir, dirClean := test.CreateTempFilesInDir(t, name, content, testRootMounted) + t.Cleanup(dirClean) + + symDir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + symlink := filepath.Join(symDir, name+"_symlink.testfile") + test.Ok(t, os.Symlink(file, symlink)) + t.Cleanup(func() { os.Remove(symlink) }) + + return []string{file, dir, symDir} +} + +// Setup + +func setupAzure(t *testing.T, c *Config, name string) { + const ( + defaultBlobStorageURL = "127.0.0.1:10000" + defaultAccountName = "devstoreaccount1" + defaultAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + ) + + var ( + blobURL = getEnv("TEST_AZURITE_URL", defaultBlobStorageURL) + accountName = getEnv("TEST_ACCOUNT_NAME", defaultAccountName) + accountKey = getEnv("TEST_ACCOUNT_KEY", defaultAccountKey) + ) + + c.Backend = backend.Azure + c.Azure = azure.Config{ + AccountName: accountName, + AccountKey: accountKey, + ContainerName: name, + BlobStorageURL: blobURL, + Azurite: true, + Timeout: defaultStorageOperationTimeout, + } +} + +func setupFileSystem(t *testing.T, c *Config, name string) { + dir, cleanup := test.CreateTempDir(t, "filesystem-cache-root-"+sanitize(name), "testdata") + t.Cleanup(cleanup) + + c.Backend = backend.FileSystem + c.FileSystem = filesystem.Config{CacheRoot: dir} +} + +func setupGCS(t *testing.T, c *Config, name string) { + const ( + defaultEndpoint = "http://127.0.0.1:4443/storage/v1/" + defaultApiKey = "" + ) + + var ( + endpoint = getEnv("TEST_GCS_ENDPOINT", defaultEndpoint) + apiKey = getEnv("TEST_GCS_API_KEY", defaultApiKey) + bucketName = sanitize(name) + opts []option.ClientOption + ) + + if apiKey != "" { + opts = append(opts, option.WithAPIKey(apiKey)) + } else { + opts = append(opts, option.WithoutAuthentication()) + } + opts = append(opts, option.WithEndpoint(endpoint)) + opts = append(opts, option.WithHTTPClient(&http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // ignore expired SSL certificates. + }})) + + client, err := gcstorage.NewClient(context.Background(), opts...) + test.Ok(t, err) + + bucket := client.Bucket(bucketName) + test.Ok(t, bucket.Create(context.Background(), "drone-cache", &gcstorage.BucketAttrs{})) + t.Cleanup(func() { client.Close() }) + + c.Backend = backend.GCS + c.GCS = gcs.Config{ + Bucket: bucketName, + Endpoint: endpoint, + APIKey: apiKey, + Timeout: defaultStorageOperationTimeout, + } +} + +func setupS3(t *testing.T, c *Config, name string) { + const ( + defaultEndpoint = "127.0.0.1:9000" + defaultAccessKey = "AKIAIOSFODNN7EXAMPLE" + defaultSecretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" + defaultRegion = "eu-west-1" + ) + var ( + endpoint = getEnv("TEST_S3_ENDPOINT", defaultEndpoint) + accessKey = getEnv("TEST_S3_ACCESS_KEY", defaultAccessKey) + secretAccessKey = getEnv("TEST_S3_SECRET_KEY", defaultSecretAccessKey) + bucket = sanitize(name) + ) + client := awss3.New(session.Must(session.NewSessionWithOptions(session.Options{})), &aws.Config{ + Region: aws.String(defaultRegion), + Endpoint: aws.String(endpoint), + DisableSSL: aws.Bool(!strings.HasPrefix(endpoint, "https://")), + S3ForcePathStyle: aws.Bool(true), + Credentials: credentials.NewStaticCredentials(accessKey, secretAccessKey, ""), + }) + + _, err := client.CreateBucketWithContext(context.Background(), &awss3.CreateBucketInput{ + Bucket: aws.String(bucket), + }) + test.Ok(t, err) + + c.Backend = backend.S3 + c.S3 = s3.Config{ + ACL: "private", + Bucket: bucket, + Endpoint: endpoint, + Key: accessKey, + PathStyle: true, // Should be true for minio and false for AWS. + Region: defaultRegion, + Secret: secretAccessKey, + } +} + +func setupSFTP(t *testing.T, c *Config, name string) { + const ( + defaultSFTPHost = "127.0.0.1" + defaultSFTPPort = "22" + defaultUsername = "foo" + defaultPassword = "pass" + defaultCacheRoot = "/upload" + ) + + var ( + host = getEnv("TEST_SFTP_HOST", defaultSFTPHost) + port = getEnv("TEST_SFTP_PORT", defaultSFTPPort) + username = getEnv("TEST_SFTP_USERNAME", defaultUsername) + password = getEnv("TEST_SFTP_PASSWORD", defaultPassword) + cacheRoot = filepath.Join(getEnv("TEST_SFTP_CACHE_ROOT", defaultCacheRoot), "sft-cache-root-"+sanitize(name)) + ) + + /* #nosec */ + sshClient, err := ssh.Dial("tcp", fmt.Sprintf("%s:%s", host, port), &ssh.ClientConfig{ + User: username, + Auth: []ssh.AuthMethod{ssh.Password(password)}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), // #nosec TODO(kakkoyun) just a workaround for now, will fix + }) + test.Ok(t, err) + + client, err := pkgsftp.NewClient(sshClient) + test.Ok(t, err) + + test.Ok(t, client.MkdirAll(cacheRoot)) + t.Cleanup(func() { client.RemoveDirectory(cacheRoot) }) + + c.Backend = backend.SFTP + c.SFTP = sftp.Config{ + CacheRoot: cacheRoot, + Username: username, + Auth: sftp.SSHAuth{ + Password: password, + Method: sftp.SSHAuthMethodPassword, + }, + Host: host, + Port: port, + } +} + +// Helpers + +func sanitize(p string) string { + return strings.ReplaceAll(strings.TrimSpace(strings.ToLower(p)), " ", "-") +} + +func getEnv(key, defaultVal string) string { + value, ok := os.LookupEnv(key) + if !ok { + return defaultVal + } + return value +} diff --git a/main.go b/main.go index a6007630..ad4f7c0a 100644 --- a/main.go +++ b/main.go @@ -5,14 +5,21 @@ import ( stdlog "log" "os" - "github.com/meltwater/drone-cache/cache" - "github.com/meltwater/drone-cache/cache/backend" + "github.com/meltwater/drone-cache/archive" "github.com/meltwater/drone-cache/internal" - "github.com/meltwater/drone-cache/metadata" - "github.com/meltwater/drone-cache/plugin" - + "github.com/meltwater/drone-cache/internal/metadata" + "github.com/meltwater/drone-cache/internal/plugin" + "github.com/meltwater/drone-cache/storage" + "github.com/meltwater/drone-cache/storage/backend" + "github.com/meltwater/drone-cache/storage/backend/azure" + "github.com/meltwater/drone-cache/storage/backend/filesystem" + "github.com/meltwater/drone-cache/storage/backend/gcs" + "github.com/meltwater/drone-cache/storage/backend/s3" + "github.com/meltwater/drone-cache/storage/backend/sftp" + + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/urfave/cli" + "github.com/urfave/cli/v2" ) var version = "0.0.0" @@ -25,369 +32,414 @@ func main() { app.Action = run app.Version = version app.Flags = []cli.Flag{ - // Logger args - - cli.StringFlag{ - Name: "log.level, ll", - Usage: "log filtering level. ('error', 'warn', 'info', 'debug')", - Value: internal.LogLevelInfo, - EnvVar: "PLUGIN_LOG_LEVEL, LOG_LEVEL", - }, - cli.StringFlag{ - Name: "log.format, lf", - Usage: "log format to use. ('logfmt', 'json')", - Value: internal.LogFormatLogfmt, - EnvVar: "PLUGIN_LOG_FORMAT, LOG_FORMAT", - }, - - // Repo args - - cli.StringFlag{ - Name: "repo.fullname, rf", - Usage: "repository full name", - EnvVar: "DRONE_REPO", - }, - cli.StringFlag{ - Name: "repo.namespace, rns", - Usage: "repository namespace", - EnvVar: "DRONE_REPO_NAMESPACE", - }, - cli.StringFlag{ - Name: "repo.owner, ro", - Usage: "repository owner (for Drone version < 1.0)", - EnvVar: "DRONE_REPO_OWNER", - }, - cli.StringFlag{ - Name: "repo.name, rn", - Usage: "repository name", - EnvVar: "DRONE_REPO_NAME", - }, - cli.StringFlag{ - Name: "repo.link, rl", - Usage: "repository link", - EnvVar: "DRONE_REPO_LINK", - }, - cli.StringFlag{ - Name: "repo.avatar, ra", - Usage: "repository avatar", - EnvVar: "DRONE_REPO_AVATAR", - }, - cli.StringFlag{ - Name: "repo.branch, rb", - Usage: "repository default branch", - EnvVar: "DRONE_REPO_BRANCH", - }, - cli.BoolFlag{ - Name: "repo.private, rp", - Usage: "repository is private", - EnvVar: "DRONE_REPO_PRIVATE", - }, - cli.BoolFlag{ - Name: "repo.trusted, rt", - Usage: "repository is trusted", - EnvVar: "DRONE_REPO_TRUSTED", - }, - - // Commit args - - cli.StringFlag{ - Name: "remote.url, remu", - Usage: "git remote url", - EnvVar: "DRONE_REMOTE_URL", - }, - cli.StringFlag{ - Name: "commit.sha, cs", - Usage: "git commit sha", - EnvVar: "DRONE_COMMIT_SHA", - }, - cli.StringFlag{ - Name: "commit.ref, cr", - Value: "refs/heads/master", - Usage: "git commit ref", - EnvVar: "DRONE_COMMIT_REF", - }, - cli.StringFlag{ - Name: "commit.branch, cb", - Value: "master", - Usage: "git commit branch", - EnvVar: "DRONE_COMMIT_BRANCH", - }, - cli.StringFlag{ - Name: "commit.message, cm", - Usage: "git commit message", - EnvVar: "DRONE_COMMIT_MESSAGE", - }, - cli.StringFlag{ - Name: "commit.link, cl", - Usage: "git commit link", - EnvVar: "DRONE_COMMIT_LINK", - }, - cli.StringFlag{ - Name: "commit.author.name, an", - Usage: "git author name", - EnvVar: "DRONE_COMMIT_AUTHOR", - }, - cli.StringFlag{ - Name: "commit.author.email, ae", - Usage: "git author email", - EnvVar: "DRONE_COMMIT_AUTHOR_EMAIL", - }, - cli.StringFlag{ - Name: "commit.author.avatar, aa", - Usage: "git author avatar", - EnvVar: "DRONE_COMMIT_AUTHOR_AVATAR", - }, - - // Build args - - cli.StringFlag{ - Name: "build.event, be", - Value: "push", - Usage: "build event", - EnvVar: "DRONE_BUILD_EVENT", - }, - cli.IntFlag{ - Name: "build.number, bn", - Usage: "build number", - EnvVar: "DRONE_BUILD_NUMBER", - }, - cli.IntFlag{ - Name: "build.created, bc", - Usage: "build created", - EnvVar: "DRONE_BUILD_CREATED", - }, - cli.IntFlag{ - Name: "build.started, bs", - Usage: "build started", - EnvVar: "DRONE_BUILD_STARTED", - }, - cli.IntFlag{ - Name: "build.finished, bf", - Usage: "build finished", - EnvVar: "DRONE_BUILD_FINISHED", - }, - cli.StringFlag{ - Name: "build.status, bstat", - Usage: "build status", - Value: "success", - EnvVar: "DRONE_BUILD_STATUS", - }, - cli.StringFlag{ - Name: "build.link, bl", - Usage: "build link", - EnvVar: "DRONE_BUILD_LINK", - }, - cli.StringFlag{ - Name: "build.deploy, db", - Usage: "build deployment target", - EnvVar: "DRONE_DEPLOY_TO", - }, - cli.BoolFlag{ - Name: "yaml.verified, yv", - Usage: "build yaml is verified", - EnvVar: "DRONE_YAML_VERIFIED", - }, - cli.BoolFlag{ - Name: "yaml.signed, ys", - Usage: "build yaml is signed", - EnvVar: "DRONE_YAML_SIGNED", - }, - - // Prev build args - - cli.IntFlag{ - Name: "prev.build.number, pbn", - Usage: "previous build number", - EnvVar: "DRONE_PREV_BUILD_NUMBER", - }, - cli.StringFlag{ - Name: "prev.build.status, pbst", - Usage: "previous build status", - EnvVar: "DRONE_PREV_BUILD_STATUS", - }, - cli.StringFlag{ - Name: "prev.commit.sha, pcs", - Usage: "previous build sha", - EnvVar: "DRONE_PREV_COMMIT_SHA", - }, - - // Config args - - cli.StringFlag{ - Name: "backend, b", - Usage: "cache backend to use in plugin (s3, filesystem)", - Value: "s3", - EnvVar: "PLUGIN_BACKEND", - }, - - cli.StringSliceFlag{ - Name: "mount, m", - Usage: "cache directories, an array of folders to cache", - EnvVar: "PLUGIN_MOUNT", - }, - cli.BoolFlag{ - Name: "rebuild, reb", - Usage: "rebuild the cache directories", - EnvVar: "PLUGIN_REBUILD", - }, - cli.BoolFlag{ - Name: "restore, res", - Usage: "restore the cache directories", - EnvVar: "PLUGIN_RESTORE", - }, - cli.StringFlag{ - Name: "cache-key, chk", - Usage: "cache key to use for the cache directories", - EnvVar: "PLUGIN_CACHE_KEY", - }, - cli.StringFlag{ - Name: "archive-format, arcfmt", - Usage: "archive format to use to store the cache directories (tar, gzip)", - Value: cache.DefaultArchiveFormat, - EnvVar: "PLUGIN_ARCHIVE_FORMAT", - }, - cli.IntFlag{ + // Logger flags + + &cli.StringFlag{ + Name: "log.level, ll", + Usage: "log filtering level. ('error', 'warn', 'info', 'debug')", + Value: internal.LogLevelInfo, + EnvVars: []string{"PLUGIN_LOG_LEVEL", "LOG_LEVEL"}, + }, + &cli.StringFlag{ + Name: "log.format, lf", + Usage: "log format to use. ('logfmt', 'json')", + Value: internal.LogFormatLogfmt, + EnvVars: []string{"PLUGIN_LOG_FORMAT", "LOG_FORMAT"}, + }, + + // Repo flags + + &cli.StringFlag{ + Name: "repo.fullname, rf", + Usage: "repository full name", + EnvVars: []string{"DRONE_REPO"}, + }, + &cli.StringFlag{ + Name: "repo.namespace, rns", + Usage: "repository namespace", + EnvVars: []string{"DRONE_REPO_NAMESPACE"}, + }, + &cli.StringFlag{ + Name: "repo.owner, ro", + Usage: "repository owner (for Drone version < 1.0)", + EnvVars: []string{"DRONE_REPO_OWNER"}, + }, + &cli.StringFlag{ + Name: "repo.name, rn", + Usage: "repository name", + EnvVars: []string{"DRONE_REPO_NAME"}, + }, + &cli.StringFlag{ + Name: "repo.link, rl", + Usage: "repository link", + EnvVars: []string{"DRONE_REPO_LINK"}, + }, + &cli.StringFlag{ + Name: "repo.avatar, ra", + Usage: "repository avatar", + EnvVars: []string{"DRONE_REPO_AVATAR"}, + }, + &cli.StringFlag{ + Name: "repo.branch, rb", + Usage: "repository default branch", + EnvVars: []string{"DRONE_REPO_BRANCH"}, + }, + &cli.BoolFlag{ + Name: "repo.private, rp", + Usage: "repository is private", + EnvVars: []string{"DRONE_REPO_PRIVATE"}, + }, + &cli.BoolFlag{ + Name: "repo.trusted, rt", + Usage: "repository is trusted", + EnvVars: []string{"DRONE_REPO_TRUSTED"}, + }, + + // Commit flags + + &cli.StringFlag{ + Name: "remote.url, remu", + Usage: "git remote url", + EnvVars: []string{"DRONE_REMOTE_URL"}, + }, + &cli.StringFlag{ + Name: "commit.sha, cs", + Usage: "git commit sha", + EnvVars: []string{"DRONE_COMMIT_SHA"}, + }, + &cli.StringFlag{ + Name: "commit.ref, cr", + Value: "refs/heads/master", + Usage: "git commit ref", + EnvVars: []string{"DRONE_COMMIT_REF"}, + }, + &cli.StringFlag{ + Name: "commit.branch, cb", + Value: "master", + Usage: "git commit branch", + EnvVars: []string{"DRONE_COMMIT_BRANCH"}, + }, + &cli.StringFlag{ + Name: "commit.message, cm", + Usage: "git commit message", + EnvVars: []string{"DRONE_COMMIT_MESSAGE"}, + }, + &cli.StringFlag{ + Name: "commit.link, cl", + Usage: "git commit link", + EnvVars: []string{"DRONE_COMMIT_LINK"}, + }, + &cli.StringFlag{ + Name: "commit.author.name, an", + Usage: "git author name", + EnvVars: []string{"DRONE_COMMIT_AUTHOR"}, + }, + &cli.StringFlag{ + Name: "commit.author.email, ae", + Usage: "git author email", + EnvVars: []string{"DRONE_COMMIT_AUTHOR_EMAIL"}, + }, + &cli.StringFlag{ + Name: "commit.author.avatar, aa", + Usage: "git author avatar", + EnvVars: []string{"DRONE_COMMIT_AUTHOR_AVATAR"}, + }, + + // Build flags + + &cli.StringFlag{ + Name: "build.event, be", + Value: "push", + Usage: "build event", + EnvVars: []string{"DRONE_BUILD_EVENT"}, + }, + &cli.IntFlag{ + Name: "build.number, bn", + Usage: "build number", + EnvVars: []string{"DRONE_BUILD_NUMBER"}, + }, + &cli.IntFlag{ + Name: "build.created, bc", + Usage: "build created", + EnvVars: []string{"DRONE_BUILD_CREATED"}, + }, + &cli.IntFlag{ + Name: "build.started, bs", + Usage: "build started", + EnvVars: []string{"DRONE_BUILD_STARTED"}, + }, + &cli.IntFlag{ + Name: "build.finished, bf", + Usage: "build finished", + EnvVars: []string{"DRONE_BUILD_FINISHED"}, + }, + &cli.StringFlag{ + Name: "build.status, bstat", + Usage: "build status", + Value: "success", + EnvVars: []string{"DRONE_BUILD_STATUS"}, + }, + &cli.StringFlag{ + Name: "build.link, bl", + Usage: "build link", + EnvVars: []string{"DRONE_BUILD_LINK"}, + }, + &cli.StringFlag{ + Name: "build.deploy, db", + Usage: "build deployment target", + EnvVars: []string{"DRONE_DEPLOY_TO"}, + }, + &cli.BoolFlag{ + Name: "yaml.verified, yv", + Usage: "build yaml is verified", + EnvVars: []string{"DRONE_YAML_VERIFIED"}, + }, + &cli.BoolFlag{ + Name: "yaml.signed, ys", + Usage: "build yaml is signed", + EnvVars: []string{"DRONE_YAML_SIGNED"}, + }, + + // Prev build flags + + &cli.IntFlag{ + Name: "prev.build.number, pbn", + Usage: "previous build number", + EnvVars: []string{"DRONE_PREV_BUILD_NUMBER"}, + }, + &cli.StringFlag{ + Name: "prev.build.status, pbst", + Usage: "previous build status", + EnvVars: []string{"DRONE_PREV_BUILD_STATUS"}, + }, + &cli.StringFlag{ + Name: "prev.commit.sha, pcs", + Usage: "previous build sha", + EnvVars: []string{"DRONE_PREV_COMMIT_SHA"}, + }, + + // Config flags + + &cli.StringFlag{ + Name: "backend, b", + Usage: "cache backend to use in plugin (s3, filesystem, sftp, azure, gcs)", + Value: backend.S3, + EnvVars: []string{"PLUGIN_BACKEND"}, + }, + &cli.StringSliceFlag{ + Name: "mount, m", + Usage: "cache directories, an array of folders to cache", + EnvVars: []string{"PLUGIN_MOUNT"}, + }, + &cli.BoolFlag{ + Name: "rebuild, reb", + Usage: "rebuild the cache directories", + EnvVars: []string{"PLUGIN_REBUILD"}, + }, + &cli.BoolFlag{ + Name: "restore, res", + Usage: "restore the cache directories", + EnvVars: []string{"PLUGIN_RESTORE"}, + }, + // RESTORE + &cli.StringFlag{ + Name: "cache-key, chk", + Usage: "cache key to use for the cache directories", + EnvVars: []string{"PLUGIN_CACHE_KEY"}, + }, + // CACHE-KEYS + // REBUILD-KEYS + // RESTORE-KEYS + &cli.StringFlag{ + Name: "archive-format, arcfmt", + Usage: "archive format to use to store the cache directories (tar, gzip)", + Value: archive.DefaultArchiveFormat, + EnvVars: []string{"PLUGIN_ARCHIVE_FORMAT"}, + }, + &cli.IntFlag{ Name: "compression-level, cpl", Usage: `compression level to use for gzip compression when archive-format specified as gzip (check https://godoc.org/compress/flate#pkg-constants for available options)`, - Value: cache.DefaultCompressionLevel, - EnvVar: "PLUGIN_COMPRESSION_LEVEL", + Value: archive.DefaultCompressionLevel, + EnvVars: []string{"PLUGIN_COMPRESSION_LEVEL"}, }, - cli.BoolFlag{ - Name: "skip-symlinks, ss", - Usage: "skip symbolic links in archive", - EnvVar: "PLUGIN_SKIP_SYMLINKS, SKIP_SYMLINKS", + &cli.BoolFlag{ + Name: "skip-symlinks, ss", + Usage: "skip symbolic links in archive", + EnvVars: []string{"PLUGIN_SKIP_SYMLINKS", "SKIP_SYMLINKS"}, }, - cli.BoolFlag{ - Name: "debug, d", - Usage: "debug", - EnvVar: "PLUGIN_DEBUG, DEBUG", + &cli.BoolFlag{ + Name: "debug, d", + Usage: "debug", + EnvVars: []string{"PLUGIN_DEBUG, DEBUG"}, }, - cli.BoolFlag{ - Name: "exit-code, ex", - Usage: "always exit with exit code, disable silent fails for known errors", - Hidden: true, - EnvVar: "PLUGIN_EXIT_CODE, EXIT_CODE", + &cli.BoolFlag{ + Name: "exit-code, ex", + Usage: "always exit with exit code, disable silent fails for known errors", + Hidden: true, + EnvVars: []string{"PLUGIN_EXIT_CODE", "EXIT_CODE"}, }, - // Volume specific Config args + // Backends Configs + + // Shared Config flags - cli.StringFlag{ - Name: "filesystem-cache-root, fcr", - Usage: "local filesystem root directory for the filesystem cache", - Value: "/tmp/cache", - EnvVar: "PLUGIN_FILESYSTEM_CACHE_ROOT, FILESYSTEM_CACHE_ROOT", + &cli.DurationFlag{ + Name: "backend.operation-timeout, stopt", + Usage: "timeout value to use for each storage operations", + Value: storage.DefaultOperationTimeout, + EnvVars: []string{"PLUGIN_BACKEND_OPERATION_TIMEOUT", "BACKEND_OPERATION_TIMEOUT"}, + }, + &cli.StringFlag{ + Name: "endpoint, e", + Usage: "endpoint for the s3/cloud storage connection", + EnvVars: []string{"PLUGIN_ENDPOINT", "S3_ENDPOINT", "GCS_ENDPOINT"}, + }, + &cli.StringFlag{ + Name: "bucket, bckt", + Usage: "AWS bucket name", + EnvVars: []string{"PLUGIN_BUCKET", "S3_BUCKET", "GCS_BUCKET"}, }, - // S3 specific Config args + // Volume specific Config flags - cli.StringFlag{ - Name: "endpoint, e", - Usage: "endpoint for the s3/cloud storage connection", - EnvVar: "PLUGIN_ENDPOINT,S3_ENDPOINT,CLOUD_STORAGE_ENDPOINT", + &cli.StringFlag{ + Name: "filesystem.cache-root, fcr", + Usage: "local filesystem root directory for the filesystem cache", + Value: "/tmp/cache", + EnvVars: []string{"PLUGIN_FILESYSTEM_CACHE_ROOT", "FILESYSTEM_CACHE_ROOT"}, }, - cli.StringFlag{ - Name: "access-key, akey", - Usage: "AWS access key", - EnvVar: "PLUGIN_ACCESS_KEY,AWS_ACCESS_KEY_ID,CACHE_AWS_ACCESS_KEY_ID", + + // S3 specific Config flags + + &cli.StringFlag{ + Name: "access-key, akey", + Usage: "AWS access key", + EnvVars: []string{"PLUGIN_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "CACHE_AWS_ACCESS_KEY_ID"}, }, - cli.StringFlag{ - Name: "secret-key, skey", - Usage: "AWS/GCP secret key", - EnvVar: "PLUGIN_SECRET_KEY,AWS_SECRET_ACCESS_KEY,CACHE_AWS_SECRET_ACCESS_KEY,GCP_API_KEY", + &cli.StringFlag{ + Name: "secret-key, skey", + Usage: "AWS secret key", + EnvVars: []string{"PLUGIN_SECRET_KEY", "AWS_SECRET_ACCESS_KEY", "CACHE_AWS_SECRET_ACCESS_KEY"}, }, - cli.StringFlag{ - Name: "bucket, bckt", - Usage: "AWS bucket name", - EnvVar: "PLUGIN_BUCKET,S3_BUCKET,CLOUD_STORAGE_BUCKET", + &cli.StringFlag{ + Name: "region, reg", + Usage: "AWS bucket region. (us-east-1, eu-west-1, ...)", + EnvVars: []string{"PLUGIN_REGION", "S3_REGION"}, }, - cli.StringFlag{ - Name: "region, reg", - Usage: "AWS bucket region. (us-east-1, eu-west-1, ...)", - EnvVar: "PLUGIN_REGION,S3_REGION", + &cli.BoolFlag{ + Name: "path-style, ps", + Usage: "AWS path style to use for bucket paths. (true for minio, false for aws)", + EnvVars: []string{"PLUGIN_PATH_STYLE", "AWS_PLUGIN_PATH_STYLE"}, }, - cli.BoolFlag{ - Name: "path-style, ps", - Usage: "use path style for bucket paths. (true for minio, false for aws)", - EnvVar: "PLUGIN_PATH_STYLE", + &cli.StringFlag{ + Name: "acl", + Usage: "upload files with acl (private, public-read, ...)", + Value: "private", + EnvVars: []string{"PLUGIN_ACL", "AWS_ACL"}, }, - cli.StringFlag{ - Name: "acl", - Usage: "upload files with acl (private, public-read, ...)", - Value: "private", - EnvVar: "PLUGIN_ACL", + &cli.StringFlag{ + Name: "encryption, enc", + Usage: "server-side encryption algorithm, defaults to none. (AES256, aws:kms)", + EnvVars: []string{"PLUGIN_ENCRYPTION", "AWS_ENCRYPTION"}, }, - cli.StringFlag{ - Name: "encryption, enc", - Usage: "server-side encryption algorithm, defaults to none. (AES256, aws:kms)", - EnvVar: "PLUGIN_ENCRYPTION", + + // GCS specific Configs flags + + &cli.StringFlag{ + Name: "gcs.api-key", + Usage: "Google service account API key", + EnvVars: []string{"PLUGIN_API_KEY", "GCP_API_KEY"}, + }, + &cli.StringFlag{ + Name: "gcs.json-key", + Usage: "Google service account JSON key", + EnvVars: []string{"PLUGIN_JSON_KEY", "GCS_CACHE_JSON_KEY"}, + }, + &cli.StringFlag{ + Name: "gcs.acl, gacl", + Usage: "upload files with acl (private, public-read, ...)", + Value: "private", + EnvVars: []string{"PLUGIN_GCS_ACL", "GCS_ACL"}, + }, + &cli.StringFlag{ + Name: "gcs.encryption-key, genc", + Usage: `server-side encryption key, must be a 32-byte AES-256 key, defaults to none + (See https://cloud.google.com/storage/docs/encryption for details.)`, + EnvVars: []string{"PLUGIN_GCS_ENCRYPTION_KEY", "GCS_ENCRYPTION_KEY"}, }, // Azure specific Config flags - cli.StringFlag{ - Name: "azure-account-name", - Usage: "Azure Blob Storage Account Name", - EnvVar: "PLUGIN_ACCOUNT_NAME,AZURE_ACCOUNT_NAME", + &cli.StringFlag{ + Name: "azure.account-name", + Usage: "Azure Blob Storage Account Name", + EnvVars: []string{"PLUGIN_ACCOUNT_NAME", "AZURE_ACCOUNT_NAME"}, + }, + &cli.StringFlag{ + Name: "azure.account-key", + Usage: "Azure Blob Storage Account Key", + EnvVars: []string{"PLUGIN_ACCOUNT_KEY", "AZURE_ACCOUNT_KEY"}, }, - cli.StringFlag{ - Name: "azure-account-key", - Usage: "Azure Blob Storage Account Key", - EnvVar: "PLUGIN_ACCOUNT_KEY,AZURE_ACCOUNT_KEY", + &cli.StringFlag{ + Name: "azure.blob-container-name", + Usage: "Azure Blob Storage container name", + EnvVars: []string{"PLUGIN_CONTAINER", "AZURE_CONTAINER_NAME"}, }, - cli.StringFlag{ - Name: "azure-container-name", - Usage: "Azure Blob Storage container name", - EnvVar: "PLUGIN_CONTAINER,AZURE_CONTAINER_NAME", + &cli.StringFlag{ + Name: "azure.blob-storage-url", + Usage: "Azure Blob Storage URL", + Value: "blob.core.windows.net", + EnvVars: []string{"AZURE_BLOB_STORAGE_URL"}, }, - cli.StringFlag{ - Name: "azure-blob-storage-url", - Usage: "Azure Blob Storage URL", - Value: "blob.core.windows.net", - EnvVar: "AZURE_BLOB_STORAGE_URL", + &cli.IntFlag{ + Name: "azure.blob-max-retry-requets", + Usage: "Azure Blob Storage Max Retry Requests", + EnvVars: []string{"AZURE_BLOB_MAX_RETRY_REQUESTS"}, + Value: azure.DefaultBlobMaxRetryRequests, }, // SFTP specific Config flags - cli.StringFlag{ - Name: "sftp-cache-root", - Usage: "sftp root directory", - EnvVar: "SFTP_CACHE_ROOT", - }, - cli.StringFlag{ - Name: "sftp-username", - Usage: "sftp username", - EnvVar: "SFTP_USERNAME", - }, - cli.StringFlag{ - Name: "sftp-password", - Usage: "sftp password", - EnvVar: "SFTP_PASSWORD", - }, - cli.StringFlag{ - Name: "ftp-public-key-file", - Usage: "sftp public key file path", - EnvVar: "SFTP_PUBLIC_KEY_FILE", - }, - cli.StringFlag{ - Name: "sftp-auth-method", - Usage: "sftp auth method, defaults to none. (PASSWORD, PUBLIC_KEY_FILE)", - EnvVar: "SFTP_AUTH_METHOD", - }, - cli.StringFlag{ - Name: "sftp-host", - Usage: "sftp host", - EnvVar: "SFTP_HOST", - }, - cli.StringFlag{ - Name: "sftp-port", - Usage: "sftp port", - EnvVar: "SFTP_PORT", + &cli.StringFlag{ + Name: "sftp.cache-root", + Usage: "sftp root directory", + EnvVars: []string{"SFTP_CACHE_ROOT"}, + }, + &cli.StringFlag{ + Name: "sftp.username", + Usage: "sftp username", + EnvVars: []string{"PLUGIN_USERNAME", "SFTP_USERNAME"}, + }, + &cli.StringFlag{ + Name: "sftp.password", + Usage: "sftp password", + EnvVars: []string{"PLUGIN_PASSWORD", "SFTP_PASSWORD"}, + }, + &cli.StringFlag{ + Name: "sftp.public-key-file", + Usage: "sftp public key file path", + EnvVars: []string{"PLUGIN_PUBLIC_KEY_FILE", "SFTP_PUBLIC_KEY_FILE"}, + }, + &cli.StringFlag{ + Name: "sftp.auth-method", + Usage: "sftp auth method, defaults to none. (PASSWORD, PUBLIC_KEY_FILE)", + EnvVars: []string{"SFTP_AUTH_METHOD"}, + }, + &cli.StringFlag{ + Name: "sftp.host", + Usage: "sftp host", + EnvVars: []string{"SFTP_HOST"}, + }, + &cli.StringFlag{ + Name: "sftp.port", + Usage: "sftp port", + EnvVars: []string{"SFTP_PORT"}, }, } if err := app.Run(os.Args); err != nil { - stdlog.Fatalf("%+v", err) + stdlog.Fatalf("%#v", err) } } @@ -400,97 +452,97 @@ func run(c *cli.Context) error { logger := internal.NewLogger(logLevel, c.String("log.format"), "drone-cache") - plg := plugin.Plugin{ - Logger: logger, - Metadata: metadata.Metadata{ - Repo: metadata.Repo{ - Namespace: c.String("repo.namespace"), - Owner: c.String("repo.owner"), - Name: c.String("repo.name"), - Link: c.String("repo.link"), - Avatar: c.String("repo.avatar"), - Branch: c.String("repo.branch"), - Private: c.Bool("repo.private"), - Trusted: c.Bool("repo.trusted"), - }, - Build: metadata.Build{ - Number: c.Int("build.number"), - Event: c.String("build.event"), - Status: c.String("build.status"), - Deploy: c.String("build.deploy"), - Created: int64(c.Int("build.created")), - Started: int64(c.Int("build.started")), - Finished: int64(c.Int("build.finished")), - Link: c.String("build.link"), - }, - Commit: metadata.Commit{ - Remote: c.String("remote.url"), - Sha: c.String("commit.sha"), - Ref: c.String("commit.sha"), - Link: c.String("commit.link"), - Branch: c.String("commit.branch"), - Message: c.String("commit.message"), - Author: metadata.Author{ - Name: c.String("commit.author.name"), - Email: c.String("commit.author.email"), - Avatar: c.String("commit.author.avatar"), - }, + plg := plugin.New(log.With(logger, "component", "plugin")) + plg.Metadata = metadata.Metadata{ + Repo: metadata.Repo{ + Namespace: c.String("repo.namespace"), + Owner: c.String("repo.owner"), + Name: c.String("repo.name"), + Link: c.String("repo.link"), + Avatar: c.String("repo.avatar"), + Branch: c.String("repo.branch"), + Private: c.Bool("repo.private"), + Trusted: c.Bool("repo.trusted"), + }, + Build: metadata.Build{ + Number: c.Int("build.number"), + Event: c.String("build.event"), + Status: c.String("build.status"), + Deploy: c.String("build.deploy"), + Created: int64(c.Int("build.created")), + Started: int64(c.Int("build.started")), + Finished: int64(c.Int("build.finished")), + Link: c.String("build.link"), + }, + Commit: metadata.Commit{ + Remote: c.String("remote.url"), + Sha: c.String("commit.sha"), + Ref: c.String("commit.sha"), + Link: c.String("commit.link"), + Branch: c.String("commit.branch"), + Message: c.String("commit.message"), + Author: metadata.Author{ + Name: c.String("commit.author.name"), + Email: c.String("commit.author.email"), + Avatar: c.String("commit.author.avatar"), }, }, - Config: plugin.Config{ - ArchiveFormat: c.String("archive-format"), - Backend: c.String("backend"), - CacheKey: c.String("cache-key"), - CompressionLevel: c.Int("compression-level"), - Debug: c.Bool("debug"), - Mount: c.StringSlice("mount"), - Rebuild: c.Bool("rebuild"), - Restore: c.Bool("restore"), - - FileSystem: backend.FileSystemConfig{ - CacheRoot: c.String("filesystem-cache-root"), - }, - - S3: backend.S3Config{ - ACL: c.String("acl"), - Bucket: c.String("bucket"), - Encryption: c.String("encryption"), - Endpoint: c.String("endpoint"), - Key: c.String("access-key"), - PathStyle: c.Bool("path-style"), - Region: c.String("region"), - Secret: c.String("secret-key"), - }, - - Azure: backend.AzureConfig{ - AccountName: c.String("azure-account-name"), - AccountKey: c.String("azure-account-key"), - ContainerName: c.String("azure-container-name"), - BlobStorageURL: c.String("azure-blob-storage-url"), - Azurite: false, - }, - - SFTP: backend.SFTPConfig{ - CacheRoot: c.String("sftp-cache-root"), - Username: c.String("sftp-username"), - Host: c.String("sftp-host"), - Port: c.String("sftp-port"), - Auth: backend.SSHAuth{ - Password: c.String("sftp-password"), - PublicKeyFile: c.String("sftp-public-key-file"), - Method: backend.SSHAuthMethod(c.String("sftp-auth-method")), - }, - }, + } - CloudStorage: backend.CloudStorageConfig{ - Bucket: c.String("bucket"), - Encryption: c.String("encryption"), - Endpoint: c.String("endpoint"), - APIKey: c.String("secret-key"), + plg.Config = plugin.Config{ + ArchiveFormat: c.String("archive-format"), + Backend: c.String("backend"), + CacheKeyTemplate: c.String("cache-key"), + CompressionLevel: c.Int("compression-level"), + Debug: c.Bool("debug"), + Mount: c.StringSlice("mount"), + Rebuild: c.Bool("rebuild"), + Restore: c.Bool("restore"), + + StorageOperationTimeout: c.Duration("backend.operation-timeout"), + FileSystem: filesystem.Config{ + CacheRoot: c.String("filesystem-cache-root"), + }, + S3: s3.Config{ + ACL: c.String("acl"), + Bucket: c.String("bucket"), + Encryption: c.String("encryption"), + Endpoint: c.String("endpoint"), + Key: c.String("access-key"), + PathStyle: c.Bool("path-style"), + Region: c.String("region"), + Secret: c.String("secret-key"), + }, + Azure: azure.Config{ + AccountName: c.String("azure.account-name"), + AccountKey: c.String("azure.account-key"), + ContainerName: c.String("azure.container-name"), + BlobStorageURL: c.String("azure.blob-storage-url"), + Azurite: false, + Timeout: c.Duration("backend.operation-timeout"), + }, + SFTP: sftp.Config{ + CacheRoot: c.String("sftp.cache-root"), + Username: c.String("sftp.username"), + Host: c.String("sftp.host"), + Port: c.String("sftp.port"), + Auth: sftp.SSHAuth{ + Password: c.String("sftp.password"), + PublicKeyFile: c.String("sftp.public-key-file"), + Method: sftp.SSHAuthMethod(c.String("sftp.auth-method")), }, - - SkipSymlinks: c.Bool("skip-symlinks"), + Timeout: c.Duration("backend.operation-timeout"), }, + GCS: gcs.Config{ + Bucket: c.String("bucket"), + Endpoint: c.String("endpoint"), + APIKey: c.String("gcs.api-key"), + JSONKey: c.String("gcsjson-key"), + Encryption: c.String("gcs.encryption-key"), + Timeout: c.Duration("backend.operation-timeout"), + }, + + SkipSymlinks: c.Bool("skip-symlinks"), } err := plg.Exec() @@ -501,6 +553,7 @@ func run(c *cli.Context) error { if c.Bool("exit-code") { // If it is exit-code enabled, always exit with error. level.Warn(logger).Log("msg", "silent fails disabled, exiting with status code on error") + return err } diff --git a/plugin/plugin.go b/plugin/plugin.go deleted file mode 100644 index 391452ca..00000000 --- a/plugin/plugin.go +++ /dev/null @@ -1,208 +0,0 @@ -// Package plugin for caching directories using given backends -package plugin - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/meltwater/drone-cache/cache" - "github.com/meltwater/drone-cache/cache/backend" - "github.com/meltwater/drone-cache/metadata" - "github.com/meltwater/drone-cache/plugin/cachekey" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" -) - -type ( - // Config plugin-specific parameters and secrets. - Config struct { - ArchiveFormat string - Backend string - CacheKey string - - CompressionLevel int - - Debug bool - SkipSymlinks bool - Rebuild bool - Restore bool - - Mount []string - - S3 backend.S3Config - FileSystem backend.FileSystemConfig - SFTP backend.SFTPConfig - Azure backend.AzureConfig - CloudStorage backend.CloudStorageConfig - } - - // Plugin stores metadata about current plugin. - Plugin struct { - Logger log.Logger - Metadata metadata.Metadata - Config Config - } - - // Error recognized error from plugin. - Error string -) - -func (e Error) Error() string { return string(e) } - -// Exec entry point of Plugin, where the magic happens. -func (p *Plugin) Exec() error { - c := p.Config - - // 1. Check parameters - if c.Debug { - level.Debug(p.Logger).Log("msg", "DEBUG MODE enabled!") - - for _, pair := range os.Environ() { - level.Debug(p.Logger).Log("var", pair) - } - - level.Debug(p.Logger).Log("msg", "plugin initialized with config", "config", fmt.Sprintf("%+v", p.Config)) - level.Debug(p.Logger).Log("msg", "plugin initialized with metadata", "metadata", fmt.Sprintf("%+v", p.Metadata)) - } - - if c.Rebuild && c.Restore { - return errors.New("rebuild and restore are mutually exclusive, please set only one of them") - } - - _, err := cachekey.ParseTemplate(c.CacheKey) - if err != nil { - return fmt.Errorf("parse, <%s> as cache key template, falling back to default %w", c.CacheKey, err) - } - - // 2. Initialize backend - backend, err := initializeBackend(p.Logger, c) - if err != nil { - return fmt.Errorf("initialize, <%s> as backend %w", c.Backend, err) - } - - // 3. Initialize cache - cch := cache.New(p.Logger, backend, - cache.WithArchiveFormat(c.ArchiveFormat), - cache.WithSkipSymlinks(c.SkipSymlinks), - cache.WithCompressionLevel(c.CompressionLevel), - ) - - // 4. Select mode - if c.Rebuild { - if err := processRebuild(p.Logger, cch, p.Config.CacheKey, p.Config.Mount, p.Metadata); err != nil { - return Error(fmt.Sprintf("[WARNING] build cache, process rebuild failed, %v\n", err)) - } - } - - if c.Restore { - if err := processRestore(p.Logger, cch, p.Config.CacheKey, p.Config.Mount, p.Metadata); err != nil { - return Error(fmt.Sprintf("[WARNING] restore cache, process restore failed, %v\n", err)) - } - } - - return nil -} - -// initializeBackend initializes backend using given configuration -func initializeBackend(logger log.Logger, c Config) (cache.Backend, error) { - switch c.Backend { - case "azure": - level.Warn(logger).Log("msg", "using azure blob as backend") - return backend.InitializeAzureBackend(logger, c.Azure, c.Debug) - case "s3": - level.Warn(logger).Log("msg", "using aws s3 as backend") - return backend.InitializeS3Backend(logger, c.S3, c.Debug) - case "cloudstorage": - level.Warn(logger).Log("msg", "using gc storage as backend") - return backend.InitializeGCSBackend(logger, c.CloudStorage, c.Debug) - case "filesystem": - level.Warn(logger).Log("msg", "using filesystem as backend") - return backend.InitializeFileSystemBackend(logger, c.FileSystem, c.Debug) - case "sftp": - level.Warn(logger).Log("msg", "using sftp as backend") - return backend.InitializeSFTPBackend(logger, c.SFTP, c.Debug) - default: - return nil, errors.New("unknown backend") - } -} - -// processRebuild the remote cache from the local environment -func processRebuild(l log.Logger, c cache.Cache, cacheKeyTmpl string, mountedDirs []string, m metadata.Metadata) error { - now := time.Now() - branch := m.Commit.Branch - - for _, mount := range mountedDirs { - if _, err := os.Stat(mount); err != nil { - return fmt.Errorf("mount <%s>, make sure file or directory exists and readable %w", mount, err) - } - - key, err := cacheKey(l, m, cacheKeyTmpl, mount, branch) - if err != nil { - return fmt.Errorf("generate cache key %w", err) - } - - path := filepath.Join(m.Repo.Name, key) - - level.Info(l).Log("msg", "rebuilding cache for directory", "local", mount, "remote", path) - - if err := c.Push(mount, path); err != nil { - return fmt.Errorf("upload %w", err) - } - } - - level.Info(l).Log("msg", "cache built", "took", time.Since(now)) - - return nil -} - -// processRestore the local environment from the remote cache -func processRestore(l log.Logger, c cache.Cache, cacheKeyTmpl string, mountedDirs []string, m metadata.Metadata) error { - now := time.Now() - branch := m.Commit.Branch - - for _, mount := range mountedDirs { - key, err := cacheKey(l, m, cacheKeyTmpl, mount, branch) - if err != nil { - return fmt.Errorf("generate cache key %w", err) - } - - path := filepath.Join(m.Repo.Name, key) - level.Info(l).Log("msg", "restoring directory", "local", mount, "remote", path) - - if err := c.Pull(path, mount); err != nil { - return fmt.Errorf("download %w", err) - } - } - - level.Info(l).Log("msg", "cache restored", "took", time.Since(now)) - - return nil -} - -// Helpers - -// cacheKey generates key from given template as parameter or fallbacks hash -func cacheKey(l log.Logger, p metadata.Metadata, cacheKeyTmpl, mount, branch string) (string, error) { - level.Info(l).Log("msg", "using provided cache key template") - - key, err := cachekey.Generate(cacheKeyTmpl, mount, metadata.Metadata{ - Build: p.Build, - Commit: p.Commit, - Repo: p.Repo, - }) - - if err != nil { - level.Error(l).Log("msg", "falling back to default key", "err", err) - key, err = cachekey.Hash(mount, branch) - - if err != nil { - return "", fmt.Errorf("generate hash key for mounted %w", err) - } - } - - return key, nil -} diff --git a/plugin/plugin_test.go b/plugin/plugin_test.go deleted file mode 100644 index 90b1eff2..00000000 --- a/plugin/plugin_test.go +++ /dev/null @@ -1,598 +0,0 @@ -package plugin - -import ( - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/meltwater/drone-cache/cache" - "github.com/meltwater/drone-cache/cache/backend" - "github.com/meltwater/drone-cache/metadata" - - "github.com/go-kit/kit/log" - "github.com/minio/minio-go" -) - -const ( - defaultEndpoint = "127.0.0.1:9000" - defaultAccessKey = "AKIAIOSFODNN7EXAMPLE" - defaultSecretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" - bucket = "meltwater-drone-test" - region = "eu-west-1" - useSSL = false -) - -var ( - endpoint = getEnv("TEST_ENDPOINT", defaultEndpoint) - accessKey = getEnv("TEST_ACCESS_KEY", defaultAccessKey) - secretAccessKey = getEnv("TEST_SECRET_KEY", defaultSecretAccessKey) -) - -func TestRebuild(t *testing.T) { - setup(t) - defer cleanUp(t) - - dirPath := "./tmp/1" - if mkErr1 := os.MkdirAll(dirPath, 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - fPath := "./tmp/1/file_to_cache.txt" - file, fErr := os.Create(fPath) - if fErr != nil { - t.Fatal(fErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - file.Sync() - file.Close() - - absPath, err := filepath.Abs(fPath) - if err != nil { - t.Fatal(err) - } - - linkAbsPath, err := filepath.Abs("./tmp/1/symlink_to_cache.txt") - if err != nil { - t.Fatal(err) - } - - if err := os.Symlink(absPath, linkAbsPath); err != nil { - t.Fatal(err) - } - - plugin := newTestPlugin("s3", true, false, []string{dirPath}, "", "") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin exec failed, error: %v\n", err) - } -} - -func TestRebuildSkipSymlinks(t *testing.T) { - setup(t) - defer cleanUp(t) - - dirPath := "./tmp/1" - if mkErr1 := os.MkdirAll(dirPath, 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - fPath := "./tmp/1/file_to_cache.txt" - file, fErr := os.Create(fPath) - if fErr != nil { - t.Fatal(fErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - file.Sync() - file.Close() - - absPath, err := filepath.Abs(fPath) - if err != nil { - t.Fatal(err) - } - - linkAbsPath, err := filepath.Abs("./tmp/1/symlink_to_cache.txt") - if err != nil { - t.Fatal(err) - } - - if err := os.Symlink(absPath, linkAbsPath); err != nil { - t.Fatal(err) - } - - plugin := newTestPlugin("s3", true, false, []string{"./tmp/1"}, "", "") - plugin.Config.SkipSymlinks = true - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin exec failed, error: %v\n", err) - } -} - -func TestRebuildWithCacheKey(t *testing.T) { - setup(t) - defer cleanUp(t) - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file, fErr := os.Create("./tmp/1/file_to_cache.txt") - if fErr != nil { - t.Fatal(fErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - file.Sync() - file.Close() - - plugin := newTestPlugin("s3", true, false, []string{"./tmp/1"}, "{{ .Repo.Name }}_{{ .Commit.Branch }}_{{ .Build.Number }}", "") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin exec failed, error: %v\n", err) - } -} - -func TestRebuildWithGzip(t *testing.T) { - setup(t) - defer cleanUp(t) - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file, fErr := os.Create("./tmp/1/file_to_cache.txt") - if fErr != nil { - t.Fatal(fErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - file.Sync() - file.Close() - - plugin := newTestPlugin("s3", true, false, []string{"./tmp/1"}, "", "gzip") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin exec failed, error: %v\n", err) - } -} - -func TestRebuildWithFilesystem(t *testing.T) { - setup(t) - defer cleanUp(t) - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file, fErr := os.Create("./tmp/1/file_to_cache.txt") - if fErr != nil { - t.Fatal(fErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - file.Sync() - file.Close() - - plugin := newTestPlugin("filesystem", true, false, []string{"./tmp/1"}, "", "gzip") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin exec failed, error: %v\n", err) - } -} - -func TestRebuildNonExisting(t *testing.T) { - setup(t) - defer cleanUp(t) - - plugin := newTestPlugin("s3", true, false, []string{"./nonexisting/path"}, "", "") - - if err := plugin.Exec(); err == nil { - t.Error("plugin exec did not fail as expected, error: ") - } -} - -func TestRestore(t *testing.T) { - setup(t) - defer cleanUp(t) - - dirPath := "./tmp/1" - if err := os.MkdirAll(dirPath, 0755); err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll("./tmp/2", 0755); err != nil { - t.Fatal(err) - } - - fPath := "./tmp/1/file_to_cache.txt" - file, cErr := os.Create(fPath) - if cErr != nil { - t.Fatal(cErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file.Sync() - file.Close() - - file1, fErr1 := os.Create("./tmp/1/file1_to_cache.txt") - if fErr1 != nil { - t.Fatal(fErr1) - } - - if _, err := file1.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file1.Sync() - file1.Close() - - absPath, err := filepath.Abs(fPath) - if err != nil { - t.Fatal(err) - } - - linkAbsPath, err := filepath.Abs("./tmp/1/symlink_to_cache.txt") - if err != nil { - t.Fatal(err) - } - - if err := os.Symlink(absPath, linkAbsPath); err != nil { - t.Fatal(err) - } - - plugin := newTestPlugin("s3", true, false, []string{dirPath}, "", "") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (rebuild mode) exec failed, error: %v\n", err) - } - - if err := os.RemoveAll("./tmp"); err != nil { - t.Fatal(err) - } - - plugin.Config.Rebuild = false - plugin.Config.Restore = true - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (restore mode) exec failed, error: %v\n", err) - } - - if _, err := os.Stat("./tmp/1/file_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } - - if _, err := os.Stat("./tmp/1/file1_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } - - target, err := os.Readlink("./tmp/1/symlink_to_cache.txt") - if err != nil { - t.Error(err) - } - - if _, err := os.Stat(target); os.IsNotExist(err) { - t.Error(err) - } -} - -func TestRestoreWithCacheKey(t *testing.T) { - setup(t) - defer cleanUp(t) - - if err := os.MkdirAll("./tmp/1", 0755); err != nil { - t.Fatal(err) - } - - file, cErr := os.Create("./tmp/1/file_to_cache.txt") - if cErr != nil { - t.Fatal(cErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file.Sync() - file.Close() - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file1, fErr1 := os.Create("./tmp/1/file1_to_cache.txt") - if fErr1 != nil { - t.Fatal(fErr1) - } - - if _, err := file1.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file1.Sync() - file1.Close() - - plugin := newTestPlugin("s3", true, false, []string{"./tmp/1"}, "{{ .Repo.Name }}_{{ .Commit.Branch }}_{{ .Build.Number }}", "") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (rebuild mode) exec failed, error: %v\n", err) - } - - if err := os.RemoveAll("./tmp"); err != nil { - t.Fatal(err) - } - - plugin.Config.Rebuild = false - plugin.Config.Restore = true - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (restore mode) exec failed, error: %v\n", err) - } - - if _, err := os.Stat("./tmp/1/file_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } - - if _, err := os.Stat("./tmp/1/file1_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } -} - -func TestRestoreWithGzip(t *testing.T) { - setup(t) - defer cleanUp(t) - - if err := os.MkdirAll("./tmp/1", 0755); err != nil { - t.Fatal(err) - } - - file, cErr := os.Create("./tmp/1/file_to_cache.txt") - if cErr != nil { - t.Fatal(cErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file.Sync() - file.Close() - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file1, fErr1 := os.Create("./tmp/1/file1_to_cache.txt") - if fErr1 != nil { - t.Fatal(fErr1) - } - - if _, err := file1.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file1.Sync() - file1.Close() - - plugin := newTestPlugin("s3", true, false, []string{"./tmp/1"}, "", "gzip") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (rebuild mode) exec failed, error: %v\n", err) - } - - if err := os.RemoveAll("./tmp"); err != nil { - t.Fatal(err) - } - - plugin.Config.Rebuild = false - plugin.Config.Restore = true - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (restore mode) exec failed, error: %v\n", err) - } - - if _, err := os.Stat("./tmp/1/file_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } - - if _, err := os.Stat("./tmp/1/file1_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } -} - -func TestRestoreWithFilesystem(t *testing.T) { - setup(t) - defer cleanUp(t) - - if err := os.MkdirAll("./tmp/1", 0755); err != nil { - t.Fatal(err) - } - - file, cErr := os.Create("./tmp/1/file_to_cache.txt") - if cErr != nil { - t.Fatal(cErr) - } - - if _, err := file.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file.Sync() - file.Close() - - if mkErr1 := os.MkdirAll("./tmp/1", 0755); mkErr1 != nil { - t.Fatal(mkErr1) - } - - file1, fErr1 := os.Create("./tmp/1/file1_to_cache.txt") - if fErr1 != nil { - t.Fatal(fErr1) - } - - if _, err := file1.WriteString("some content\n"); err != nil { - t.Fatal(err) - } - - file1.Sync() - file1.Close() - - plugin := newTestPlugin("filesystem", true, false, []string{"./tmp/1"}, "", "gzip") - - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (rebuild mode) exec failed, error: %v\n", err) - } - - if err := os.RemoveAll("./tmp"); err != nil { - t.Fatal(err) - } - - plugin.Config.Rebuild = false - plugin.Config.Restore = true - if err := plugin.Exec(); err != nil { - t.Errorf("plugin (restore mode) exec failed, error: %v\n", err) - } - - if _, err := os.Stat("./tmp/1/file_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } - - if _, err := os.Stat("./tmp/1/file1_to_cache.txt"); os.IsNotExist(err) { - t.Error(err) - } -} - -// Helpers - -func newTestPlugin(bck string, rebuild, restore bool, mount []string, cacheKey, archiveFmt string) Plugin { - return Plugin{ - Logger: log.NewNopLogger(), - Metadata: metadata.Metadata{ - Repo: metadata.Repo{ - Branch: "master", - Name: "drone-cache", - }, - Commit: metadata.Commit{ - Branch: "master", - }, - }, - Config: Config{ - ArchiveFormat: archiveFmt, - CompressionLevel: cache.DefaultCompressionLevel, - Backend: bck, - CacheKey: cacheKey, - Mount: mount, - Rebuild: rebuild, - Restore: restore, - - FileSystem: backend.FileSystemConfig{ - CacheRoot: "../testcache/cache", - }, - - S3: backend.S3Config{ - ACL: "private", - Bucket: bucket, - Encryption: "", - Endpoint: endpoint, - Key: accessKey, - PathStyle: true, // Should be true for minio and false for AWS. - Region: region, - Secret: secretAccessKey, - }, - }, - } -} - -func newMinioClient() (*minio.Client, error) { - minioClient, err := minio.New(endpoint, accessKey, secretAccessKey, useSSL) - if err != nil { - return nil, err - } - return minioClient, nil -} - -func setup(t *testing.T) { - minioClient, err := newMinioClient() - if err != nil { - t.Fatal(err) - } - - if err = minioClient.MakeBucket(bucket, region); err != nil { - t.Fatal(err) - } -} - -func cleanUp(t *testing.T) { - if err := os.RemoveAll("./tmp"); err != nil { - t.Fatal(err) - } - - minioClient, err := newMinioClient() - if err != nil { - t.Fatal(err) - } - - if err = removeAllObjects(minioClient, bucket); err != nil { - t.Fatal(err) - } - - if err = minioClient.RemoveBucket(bucket); err != nil { - t.Fatal(err) - } -} - -func removeAllObjects(minioClient *minio.Client, bucketName string) error { - objects := make(chan string) - errors := make(chan error) - - go func() { - defer close(objects) - defer close(errors) - - for object := range minioClient.ListObjects(bucketName, "", true, nil) { - if object.Err != nil { - errors <- object.Err - } - objects <- object.Key - } - }() - - for { - select { - case object, open := <-objects: - if !open { - return nil - } - if err := minioClient.RemoveObject(bucketName, object); err != nil { - return fmt.Errorf("remove all objects failed, %v", err) - } - case err, open := <-errors: - if !open { - return nil - } - if err != nil { - return fmt.Errorf("remove all objects failed, while fetching %v", err) - } - - return nil - } - } -} - -func getEnv(key, defaultVal string) string { - value, ok := os.LookupEnv(key) - if !ok { - return defaultVal - } - return value -} diff --git a/test/assert.go b/test/assert.go new file mode 100644 index 00000000..baade134 --- /dev/null +++ b/test/assert.go @@ -0,0 +1,224 @@ +//nolint:gomnd +package test + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "testing" + + "github.com/google/go-cmp/cmp" +) + +// Assert is modified version of https://github.com/benbjohnson/testing. + +// Assert fails the test if the condition is false. +func Assert(tb testing.TB, condition bool, msg string, v ...interface{}) { + tb.Helper() + + if !condition { + _, file, line, _ := runtime.Caller(1) + tb.Fatalf("%s:%d: "+msg+"\n", append([]interface{}{filepath.Base(file), line}, v...)...) + } +} + +// Ok fails the test if an err is not nil. +func Ok(tb testing.TB, err error) { + tb.Helper() + + if err != nil { + _, file, line, _ := runtime.Caller(1) + tb.Fatalf("%s:%d: unexpected error: %s\n", filepath.Base(file), line, err.Error()) + } +} + +// NotOk fails the test if an err is nil. +func NotOk(tb testing.TB, err error) { + tb.Helper() + + if err == nil { + _, file, line, _ := runtime.Caller(1) + tb.Fatalf("%s:%d: expected error, got nothing\n", filepath.Base(file), line) + } +} + +// Expected fails if the errors does not match. +func Expected(tb testing.TB, got, want error) { + tb.Helper() + + NotOk(tb, got) + + if errors.Is(got, want) { + return + } + + _, file, line, _ := runtime.Caller(1) + tb.Fatalf("%s:%d: got unexpected error: %v\n", filepath.Base(file), line, got.Error()) +} + +// Exists fails if the file or directory in the given path does not exist. +func Exists(tb testing.TB, path string) { + tb.Helper() + + _, err := os.Lstat(path) + if err != nil { + _, file, line, _ := runtime.Caller(1) + + if os.IsNotExist(err) { + tb.Fatalf("%s:%d: should exists: %s\n", filepath.Base(file), line, err.Error()) + } + } +} + +// Equals fails the test if want is not equal to got. +func Equals(tb testing.TB, want, got interface{}, v ...interface{}) { + tb.Helper() + + if diff := cmp.Diff(want, got); diff != "" { + _, file, line, _ := runtime.Caller(1) + + var msg string + + if len(v) > 0 { + msg = fmt.Sprintf(v[0].(string), v[1:]...) + } + + tb.Fatalf("%s:%d:"+msg+"\n\n\t (-want +got):\n%s", filepath.Base(file), line, diff) + } +} + +//nolint:funlen // EqualDirs fails if the contents of given directories are not the same. +func EqualDirs(tb testing.TB, dst string, src string, srcs []string) { + tb.Helper() + + srcList := []string{} + + for _, s := range srcs { + if isDir(s) { + paths, err := expand(s) + if err != nil { + tb.Fatalf("expand %s: %v\n", s, err) + } + + srcList = append(srcList, paths...) + + continue + } + + srcList = append(srcList, s) + } + + dstList, err := expand(dst) + if err != nil { + tb.Fatalf("expand %s: %v\n", dst, err) + } + + sort.Strings(srcList) + sort.Strings(dstList) + + relSrcList, err := relative(src, srcList) + if err != nil { + tb.Fatalf("relative %s: %v\n", src, err) + } + + relDstList, err := relative(dst, dstList) + if err != nil { + tb.Fatalf("relative %s: %v\n", dst, err) + } + + Equals(tb, relSrcList, relDstList) + + _, file, line, _ := runtime.Caller(1) + + for i := 0; i < len(srcList); i++ { + src := srcList[i] + dst := dstList[i] + + if isSymlink(src) && isSymlink(dst) { + src, err = os.Readlink(src) + if err != nil { + tb.Fatalf("%s:%d: unexpected error, src path, link <%s>: %s\n", + filepath.Base(file), line, src, err.Error()) + } + + dst, err = os.Readlink(dst) + if err != nil { + tb.Fatalf("%s:%d: unexpected error, dst path, link <%s>: %s\n", + filepath.Base(file), line, dst, err.Error()) + } + } + + wContent, err := ioutil.ReadFile(src) + if err != nil { + tb.Fatalf("%s:%d: unexpected error, src path <%s>: %s\n", filepath.Base(file), line, srcList[i], err.Error()) + } + + gContent, err := ioutil.ReadFile(dst) + if err != nil { + tb.Fatalf("%s:%d: unexpected error, dst path <%s>: %s\n", + filepath.Base(file), line, dstList[i], err.Error()) + } + + Equals(tb, wContent, gContent) + } +} + +// Helpers + +func isDir(path string) bool { + fi, err := os.Stat(path) + if err != nil { + return false + } + + return fi.IsDir() +} + +func isSymlink(path string) bool { + fi, err := os.Lstat(path) + return err == nil && fi.Mode()&os.ModeSymlink != 0 +} + +func expand(src string) ([]string, error) { + paths := []string{} + + if err := filepath.Walk(src, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return fmt.Errorf("walk %q: %v", path, err) + } + + if fi.IsDir() { + return nil + } + + paths = append(paths, path) + + return nil + }); err != nil { + return nil, fmt.Errorf("walking the path %q: %v", src, err) + } + + return paths, nil +} + +func relative(top string, paths []string) ([]string, error) { + result := make([]string, len(paths)) + + for _, p := range paths { + name := filepath.Base(p) + + rel, err := filepath.Rel(top, filepath.Dir(p)) + if err != nil { + return []string{}, fmt.Errorf("relative path %q: %q %v", p, rel, err) + } + + name = filepath.Join(filepath.ToSlash(rel), name) + result = append(result, name) + } + + return result, nil +} diff --git a/test/helpers.go b/test/helpers.go new file mode 100644 index 00000000..3b218220 --- /dev/null +++ b/test/helpers.go @@ -0,0 +1,81 @@ +package test + +import ( + "io/ioutil" + "os" + "testing" +) + +// CreateTempFile TODO +func CreateTempFile(t testing.TB, name string, content []byte, in ...string) (string, func()) { + t.Helper() + + parent := "" + if len(in) > 0 { + parent = in[0] + } + + tmpfile, err := ioutil.TempFile(parent, name+"_*.testfile") + if err != nil { + t.Fatalf("unexpectedly failed creating the temp file: %v", err) + } + + if _, err := tmpfile.Write(content); err != nil { + t.Fatalf("unexpectedly failed writing to the temp file: %v", err) + } + + if err := tmpfile.Close(); err != nil { + t.Fatalf("unexpectedly failed closing the temp file: %v", err) + } + + return tmpfile.Name(), func() { os.Remove(tmpfile.Name()) } +} + +// CreateTempFilesInDir TODO +func CreateTempFilesInDir(t testing.TB, name string, content []byte, in ...string) (string, func()) { + t.Helper() + + parent := "" + if len(in) > 0 { + parent = in[0] + } + + tmpDir, err := ioutil.TempDir(parent, name+"-testdir-*") + if err != nil { + t.Fatalf("unexpectedly failed creating the temp dir: %v", err) + } + + for i := 0; i < 3; i++ { + tmpfile, err := ioutil.TempFile(tmpDir, name+"_*.testfile") + if err != nil { + t.Fatalf("unexpectedly failed creating the temp file: %v", err) + } + + if _, err := tmpfile.Write(content); err != nil { + t.Fatalf("unexpectedly failed writing to the temp file: %v", err) + } + + if err := tmpfile.Close(); err != nil { + t.Fatalf("unexpectedly failed closing the temp file: %v", err) + } + } + + return tmpDir, func() { os.RemoveAll(tmpDir) } +} + +// CreateTempDir TODO +func CreateTempDir(t testing.TB, name string, in ...string) (string, func()) { + t.Helper() + + parent := "" + if len(in) > 0 { + parent = in[0] + } + + tmpDir, err := ioutil.TempDir(parent, name+"-testdir-*") + if err != nil { + t.Fatalf("unexpectedly failed creating the temp dir: %v", err) + } + + return tmpDir, func() { os.RemoveAll(tmpDir) } +} From ec47cee2f2bcfea75ecb456b78fd10efa5b07a3b Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:18:32 +0200 Subject: [PATCH 07/16] Refactor CI --- .drone.yml | 73 ++++++++++++++++++++++------------------------ .golangci.yml | 5 ++++ docker-compose.yml | 15 ++++++---- 3 files changed, 50 insertions(+), 43 deletions(-) diff --git a/.drone.yml b/.drone.yml index 9a895aa1..ecf108b8 100644 --- a/.drone.yml +++ b/.drone.yml @@ -19,35 +19,39 @@ steps: image: minio/mc:RELEASE.2018-09-26T00-42-43Z commands: - sleep 5 - - mc config host add minio http://filestorage:9000 AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY + - mc config host add minio http://minio:9000 AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - mc mb --region=eu-west-1 minio/drone-cache-bucket - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine commands: - apk add --update make git - make drone-cache + environment: + CGO_ENABLED: 0 -- name: test - image: golang:1.13-alpine +- name: lint + image: golang:1.14-alpine commands: - - go test -v -mod=vendor -cover ./... + - apk add --update make git curl + - make lint environment: CGO_ENABLED: 0 - TEST_ENDPOINT: filestorage:9000 - TEST_SFTP_HOST: sftp - TEST_AZURITE_URL: azurite:10000 - volumes: - - name: testcache - path: /drone/src/testcache/cache -- name: lint - image: golang:1.13-alpine +- name: test + image: golang:1.14-alpine commands: - - "wget -O - -q https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.21.0" - - ./bin/golangci-lint run -v --enable-all -D gochecknoglobals + - go test -mod=vendor -short -cover -tags=integration ./... environment: CGO_ENABLED: 0 + TEST_S3_ENDPOINT: minio:9000 + TEST_GCS_ENDPOINT: http://fakegcs:4443/storage/v1/ + TEST_STORAGE_EMULATOR_HOST: fakegcs:4443 + TEST_SFTP_HOST: sftp + TEST_AZURITE_URL: azurite:10000 + volumes: + - name: testdata + path: /drone/src/tmp/testdata/cache - name: release-snapshot-dev image: goreleaser/goreleaser:v0.120 @@ -90,7 +94,7 @@ steps: rebuild: true region: eu-west-1 path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE @@ -107,7 +111,7 @@ steps: rebuild: true region: eu-west-1 path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE @@ -125,7 +129,7 @@ steps: rebuild: true region: eu-west-1 path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE @@ -199,7 +203,7 @@ steps: pull: always restore: true path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE @@ -217,7 +221,7 @@ steps: region: eu-west-1 restore: true path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE @@ -256,34 +260,21 @@ steps: region: eu-west-1 restore: true path_style: true - endpoint: filestorage:9000 + endpoint: minio:9000 exit_code: true environment: AWS_ACCESS_KEY_ID: AKIAIOSFODNN7EXAMPLE AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - name: build-after - image: golang:1.13-alpine + image: golang:1.14-alpine commands: - apk add --update make git - make drone-cache -- name: test-after - image: golang:1.13-alpine - commands: - - go test -v -mod=vendor -cover ./... - environment: - CGO_ENABLED: 0 - TEST_ENDPOINT: filestorage:9000 - TEST_SFTP_HOST: sftp - TEST_AZURITE_URL: azurite:10000 - volumes: - - name: testcache - path: /drone/src/testcache/cache - services: -- name: filestorage - image: minio/minio:RELEASE.2018-10-06T00-15-16Z +- name: minio + image: minio/minio:RELEASE.2020-03-05T01-04-19Z commands: - minio server /data environment: @@ -292,6 +283,12 @@ services: MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY ports: - 9000 +- name: fakegcs + image: fsouza/fake-gcs-server + ports: + - 4443 + commands: + - fake-gcs-server -public-host fakegcs -scheme http - name: sftp image: atmoz/sftp ports: @@ -308,7 +305,7 @@ services: volumes: - name: cache temp: {} -- name: testcache +- name: testdata temp: {} trigger: diff --git a/.golangci.yml b/.golangci.yml index f36f3c8b..457dc1e0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -5,6 +5,7 @@ run: deadline: 1m tests: false modules-download-mode: vendor + timeout: 5m linters: enable-all: true @@ -16,3 +17,7 @@ linters-settings: exclude: .errcheck_excludes lll: line-length: 120 + funlen: + lines: 80 + statements: 45 + diff --git a/docker-compose.yml b/docker-compose.yml index f5095133..91b2df02 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,7 +1,7 @@ version: '3' services: - filestorage: - image: minio/minio:RELEASE.2018-10-06T00-15-16Z + minio: + image: minio/minio:RELEASE.2020-03-05T01-04-19Z environment: MINIO_ACCESS_KEY: AKIAIOSFODNN7EXAMPLE MINIO_SECRET_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY @@ -9,6 +9,11 @@ services: ports: - 9000:9000 command: server /data + fake-gcs: + image: fsouza/fake-gcs-server + ports: + - 4443:4443 + command: -public-host localhost -scheme http sftp: image: atmoz/sftp ports: @@ -20,8 +25,8 @@ services: - "10000:10000" command: azurite-blob --blobHost 0.0.0.0 configure-buckets: - image: minio/mc:RELEASE.2018-09-26T00-42-43Z + image: minio/mc:RELEASE.2020-02-20T23-49-54Z entrypoint: sh depends_on: - - filestorage - command: -c "sleep 5 && mc config host add minio http://filestorage:9000 AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY && mc mb --region=eu-west-1 minio/meltwater-drone-default" + - minio + command: -c "sleep 5 && mc config host add minio http://minio:9000 AKIAIOSFODNN7EXAMPLE wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" From 9b721e49f64981d9c337c67aaf50c7ea40783f77 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:21:09 +0200 Subject: [PATCH 08/16] Minor improvements --- .gitignore | 8 +++--- .goreleaser.yml | 2 ++ Makefile | 69 ++++++++++++++++++++++++++++++++++++------------- 3 files changed, 58 insertions(+), 21 deletions(-) diff --git a/.gitignore b/.gitignore index 22fd257e..117411e6 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,9 @@ drone-cache vendor target bin -testcache -backup tmp -TODO.md +testdata + +# Azurite temp files +__*__ +__*__.json diff --git a/.goreleaser.yml b/.goreleaser.yml index 1bf9cd64..2ef56646 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -4,6 +4,7 @@ before: - make vendor dist: target/dist builds: + # TODO: -tags netgo - env: - CGO_ENABLED=0 goos: @@ -71,6 +72,7 @@ release: # NOTICE: To be able to run this stage in drone.io, we need Docker which runs in a container, # - and Docker needs a privileged container, so it's not possible for free tier right now. +# TODO: Alternative: https://github.com/goreleaser/goreleaser-action # TODO: Add missing GOOS and ARCH # dockers: # - diff --git a/Makefile b/Makefile index 09792573..22633b04 100644 --- a/Makefile +++ b/Makefile @@ -10,45 +10,54 @@ GOLANGCI_LINT_VERSION=v1.21.0 GOLANGCI_LINT_BIN=$(GOPATH)/bin/golangci-lint EMBEDMD_BIN=$(GOPATH)/bin/embedmd GOTEST_BIN=$(GOPATH)/bin/gotest +GORELEASER_VERSION=v0.120 +GORELEASER_BIN=$(GOPATH)/bin/goreleaser +LICHE_BIN=$(GOPATH)/bin/liche .PHONY: default all default: drone-cache all: drone-cache +.PHONY: setup +setup: + ./scripts/setup_dev_environment.sh + drone-cache: vendor main.go $(wildcard *.go) $(wildcard */*.go) - CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -mod=vendor -a -ldflags '-s -w -X main.version=$(VERSION)' -o $@ . + CGO_ENABLED=0 go build -mod=vendor -a -tags netgo -ldflags '-s -w -X main.version=$(VERSION)' -o $@ . .PHONY: build -build: vendor main.go $(wildcard *.go) $(wildcard */*.go) - go build -mod=vendor -a -ldflags '-s -w -X main.version=$(VERSION)' -o drone-cache . +build: main.go $(wildcard *.go) $(wildcard */*.go) + go build -mod=vendor -tags netgo -ldflags '-X main.version=$(VERSION)' -o drone-cache . .PHONY: release -release: build - goreleaser release --rm-dist +release: drone-cache $(GORELEASER_BIN) + ${GORELEASER_BIN} release --rm-dist .PHONY: snapshot -snapshot: - goreleaser release --skip-publish --rm-dist --snapshot +snapshot: drone-cache $(GORELEASER_BIN) + ${GORELEASER_BIN} release --skip-publish --rm-dist --snapshot .PHONY: clean clean: rm -f drone-cache rm -rf target -tmp/help.txt: clean build +tmp/help.txt: drone-cache mkdir -p tmp ./drone-cache --help &> tmp/help.txt README.md: tmp/help.txt - embedmd -w README.md + ${EMBEDMD_BIN} -w README.md -tmp/docs.txt: clean build - mkdir -p tmp - # ./drone-cache --help &> tmp/help.txt +tmp/docs.txt: drone-cache @echo "IMPLEMENT ME" DOCS.md: tmp/docs.txt - embedmd -w DOCS.md + ${EMBEDMD_BIN} -w DOCS.md + +docs: clean README.md DOCS.md ${LICHE_BIN} + @$(LICHE_BIN) --recursive docs --document-root . + @$(LICHE_BIN) --exclude "(goreportcard.com)" --document-root . *.md .PHONY: vendor vendor: @@ -57,6 +66,7 @@ vendor: .PHONY: compress compress: drone-cache + # Add as dependency @upx drone-cache .PHONY: container @@ -86,18 +96,34 @@ container-push-dev: container-dev .PHONY: test test: $(GOTEST_BIN) - docker-compose up -d - mkdir -p ./testcache/cache - gotest -race -short -cover ./... + docker-compose up -d && sleep 1 + -$(GOTEST_BIN) -failfast -race -short -tags=integration ./... + docker-compose down -v + +.PHONY: test-integration +test-integration: $(GOTEST_BIN) + docker-compose up -d && sleep 1 + -$(GOTEST_BIN) -race -cover -tags=integration -v ./... + docker-compose down -v + +.PHONY: test-unit +test-unit: $(GOTEST_BIN) + $(GOTEST_BIN) -race -cover -benchmem -v ./... + +.PHONY: test-e2e +test-e2e: $(GOTEST_BIN) + docker-compose up -d && sleep 1 + -$(GOTEST_BIN) -race -cover -tags=integration -v ./internal/plugin + docker-compose down -v .PHONY: lint lint: $(GOLANGCI_LINT_BIN) # Check .golangci.yml for configuration - $(GOLANGCI_LINT_BIN) run -v --enable-all -c .golangci.yml + $(GOLANGCI_LINT_BIN) run -v --enable-all --skip-dirs tmp -c .golangci.yml .PHONY: fix fix: $(GOLANGCI_LINT_BIN) format - $(GOLANGCI_LINT_BIN) run --fix --enable-all -c .golangci.yml + $(GOLANGCI_LINT_BIN) run --fix --enable-all --skip-dirs tmp -c .golangci.yml .PHONY: format format: @@ -113,3 +139,10 @@ $(GOLANGCI_LINT_BIN): curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \ | sed -e '/install -d/d' \ | sh -s -- -b $(GOPATH)/bin $(GOLANGCI_LINT_VERSION) + +$(GORELEASER_BIN): + curl -sfL https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh \ + | VERSION=${GORELEASER_VERSION} sh -s -- -b $(GOPATH)/bin $(GORELEASER_BIN) + +${LICHE_BIN}: + GO111MODULE=on go get -u github.com/raviqqe/liche From 2a4334d71a19c585c81899dfecc54bd0e6fb0aff Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:21:41 +0200 Subject: [PATCH 09/16] Update documentation --- CONTRIBUTING.md | 12 ++- DOCS.md | 12 +-- README.md | 203 ++++++++++++++++++++----------------- docs/examples/drone-1.0.md | 12 +-- 4 files changed, 132 insertions(+), 107 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index afa3e02f..1f9df3a7 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -15,11 +15,13 @@ the requirements below. ## Pull Request Process 0. Check out [Pull Request Checklist](#pull-request-checklist), ensure you have fulfilled each step. -1. Check out [Uber Style Guide](https://github.com/uber-go/guide/blob/master/style.md), project tries to follow it, ensure you have fulfilled it as much as possible. +1. Check out guidelines below, the project tries to follow these, ensure you have fulfilled them as much as possible. + * [Effective Go](https://golang.org/doc/effective_go.html) + * [Go Code Review Comments](https://github.com/golang/go/wiki/CodeReviewComments) 2. Ensure any install or build dependencies are removed before the end of the layer when doing a build. 3. Please ensure the [README](README.md) and [DOCS](./DOCS.md) are up-to-date with details of changes to the command-line interface, - this includes new environment variables, exposed ports, useful file locations and container parameters. + this includes new environment variables, exposed ports, used file locations, and container parameters. 4. **PLEASE ENSURE YOU DO NOT INTRODUCE BREAKING CHANGES.** 5. **PLEASE ENSURE BUG FIXES AND NEW FEATURES INCLUDE TESTS.** 6. You may merge the Pull Request in once you have the sign-off of one other maintainer/code owner, @@ -46,11 +48,11 @@ the requirements below. 0. **PLEASE DO NOT INTRODUCE BREAKING CHANGES** 1. Execute `make README.md`. This will update [usage](README.md#usage) section of [README.md](README.md) with latest CLI options 2. Increase the version numbers in any examples files and the README.md to the new version that this - release would represent. The versioning scheme we use is [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/meltwater/drone-cache/tags). + the release would represent. The versioning scheme we use is [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/meltwater/drone-cache/tags). 3. Ensure [CHANGELOG](CHANGELOG.md) is up-to-date with new version changes. 4. Update version references. -5. Create a tag on master. Any changes on master will trigger a release with given tag and `latest tag. +5. Create a tag on the master. Any changes on the master will trigger a release with the given tag and `latest tag. ```console $ git tag -am 'vX.X.X' @@ -58,5 +60,7 @@ the requirements below. $ git push --tags > ... ``` +6. Check whether all the generate artifacts in-place properly. +7. Update [plugin index](https://github.com/drone/drone-plugin-index/blob/master/content/meltwater/drone-cache/index.md) using [DOCS](./DOCS.md). > **Keep in mind that users usually use the `latest` tagged images in their pipeline, please make sure you do not interfere with their working workflow.** diff --git a/DOCS.md b/DOCS.md index 54d1b8e6..bdd21e0d 100644 --- a/DOCS.md +++ b/DOCS.md @@ -115,7 +115,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -160,7 +160,7 @@ steps: path: /tmp/cache - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -188,7 +188,7 @@ volumes: **With custom cache key template** -See [cache key templates](/meltwater/drone-cache#using-cache-key-templates) section for further information and to learn about syntax. +See [cache key templates](#using-cache-key-templates) section for further information and to learn about syntax. ```yaml kind: pipeline @@ -212,7 +212,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -260,7 +260,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -299,7 +299,7 @@ steps: debug: true - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git diff --git a/README.md b/README.md index f9ae1214..34c42e19 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,54 @@ # drone-cache -[![semver](https://img.shields.io/badge/semver-1.0.4-blue.svg?cacheSeconds=2592000)](https://github.com/meltwater/drone-cache/releases) [![Maintenance](https://img.shields.io/maintenance/yes/2019.svg)](https://github.com/meltwater/drone-cache/commits/master) [![Drone](https://cloud.drone.io/api/badges/meltwater/drone-cache/status.svg)](https://cloud.drone.io/meltwater/drone-cache) [![Go Doc](https://godoc.org/github.com/meltwater/drone-cache?status.svg)](http://godoc.org/github.com/meltwater/drone-cache) [![Go Report Card](https://goreportcard.com/badge/github.com/meltwater/drone-cache)](https://goreportcard.com/report/github.com/meltwater/drone-cache) [![codebeat badge](https://codebeat.co/badges/802c6149-ac2d-4514-8648-f618c63a8d9e)](https://codebeat.co/projects/github-com-meltwater-drone-cache-master) [![](https://images.microbadger.com/badges/image/meltwater/drone-cache.svg)](https://microbadger.com/images/meltwater/drone-cache) [![](https://images.microbadger.com/badges/version/meltwater/drone-cache.svg)](https://microbadger.com/images/meltwater/drone-cache) +[![semver](https://img.shields.io/badge/semver-1.0.4-blue.svg?cacheSeconds=2592000)](https://github.com/meltwater/drone-cache/releases) [![Drone](https://cloud.drone.io/api/badges/meltwater/drone-cache/status.svg)](https://cloud.drone.io/meltwater/drone-cache) [![Maintenance](https://img.shields.io/maintenance/yes/2020.svg)](https://github.com/meltwater/drone-cache/commits/master) [![Go Doc](https://godoc.org/github.com/meltwater/drone-cache?status.svg)](http://godoc.org/github.com/meltwater/drone-cache) [![Go Report Card](https://goreportcard.com/badge/github.com/meltwater/drone-cache)](https://goreportcard.com/report/github.com/meltwater/drone-cache) [![codebeat badge](https://codebeat.co/badges/802c6149-ac2d-4514-8648-f618c63a8d9e)](https://codebeat.co/projects/github-com-meltwater-drone-cache-master) [![](https://images.microbadger.com/badges/image/meltwater/drone-cache.svg)](https://microbadger.com/images/meltwater/drone-cache) [![](https://images.microbadger.com/badges/version/meltwater/drone-cache.svg)](https://microbadger.com/images/meltwater/drone-cache)

A Drone plugin for caching current workspace files between builds to reduce your build times. `drone-cache` is a small CLI program, written in Go without any external OS dependencies (such as tar, etc). -With `drone-cache`, you can provide your **own cache key templates**, specify **archive format** (tar, tar.gz, etc) and you can use **an S3 bucket, Azure Storage, Google Cloud Storage or a mounted volume** as storage for your cached files, even better you can implement **your own storage backend** to cover your use case. +With `drone-cache`, you can provide your **own cache key templates**, specify **archive format** (tar, tar.gz, etc) and you can use [**popular object storage**](#supported-storage-backends) as storage for your cached files, even better you can implement **your custom storage backend** to cover your use case. For detailed usage information and a list of available options please take a look at [usage](#usage) and [examples](#example-usage-of-drone-cache). If you want to learn more about custom cache keys, see [cache key templates](docs/cache_key_templates.md). If you want to learn more about the story behind `drone-cache`, you can read our blogpost [Making Drone Builds 10 Times Faster!](https://underthehood.meltwater.com/blog/2019/04/10/making-drone-builds-10-times-faster/)! +## Supported Storage Backends + +* [AWS S3](https://aws.amazon.com/s3/) + * [Configuration](#) + * [Example](#) + * Other AWS API compatible stores: + * [Minio](https://min.io/) + * [Red Hat Ceph](https://www.redhat.com/en/technologies/storage/ceph) +* [Azure Storage](https://azure.microsoft.com/en-us/services/storage/blobs/) + * [Configuration](#) + * [Example](#) +* [Google Cloud Storage](https://cloud.google.com/storage/) + * [Configuration](#) + * [Example](#) +* or any mounted local volume + * [Configuration](#) + * [Example](#) + ## How does it work `drone-cache` stores mounted directories and files under a key at the specified backend (by default S3). -Use this plugin to cache data that makes your builds faster. In the case of a cache miss or zero cache restore it will fail silently in won't break your running pipeline. +Use this plugin to cache data that makes your builds faster. In the case of a _cache miss_ or an _empty cache_ restore it will fail silently in won't break your running pipeline. The best example would be to use this with your package managers such as Mix, Bundler or Maven. After your initial download, you can build a cache and then you can restore that cache in your next build.

-With restored dependencies from a cache, commands like `mix deps.get` will only need to download new dependencies, rather than re-download every package on each and every build. +With restored dependencies from a cache, commands like `mix deps.get` will only need to download new dependencies, rather than re-download every package on each build. ## Example Usage of drone-cache -The following `.drone.yml` configuration show the most common use of drone-cache. +The following example configuration file (`.drone.yml`) shows the most common use of drone-cache. Note: These configs use drone 1.0 syntax. If you are using drone 0.8, check the examples in [docs/examples/drone-0.8.md](docs/examples/drone-0.8.md). +[//]: # (TODO: Move to a dedicated directory in docs, per backend!) ### Simple (Storing the cache in S3) ```yaml @@ -54,7 +73,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -96,77 +115,84 @@ USAGE: drone-cache [global options] command [command options] [arguments...] VERSION: - v1.0.4-18-g99c5e76-dirty + v1.0.4-76-gfa866ce COMMANDS: - help, h Shows a list of commands or help for one command + help, h Shows a list of commands or help for one command GLOBAL OPTIONS: - --log.level value, --ll value log filtering level. ('error', 'warn', 'info', 'debug') (default: "info") [$PLUGIN_LOG_LEVEL, $ LOG_LEVEL] - --log.format value, --lf value log format to use. ('logfmt', 'json') (default: "logfmt") [$PLUGIN_LOG_FORMAT, $ LOG_FORMAT] - --repo.fullname value, --rf value repository full name [$DRONE_REPO] - --repo.namespace value, --rns value repository namespace [$DRONE_REPO_NAMESPACE] - --repo.owner value, --ro value repository owner (for Drone version < 1.0) [$DRONE_REPO_OWNER] - --repo.name value, --rn value repository name [$DRONE_REPO_NAME] - --repo.link value, --rl value repository link [$DRONE_REPO_LINK] - --repo.avatar value, --ra value repository avatar [$DRONE_REPO_AVATAR] - --repo.branch value, --rb value repository default branch [$DRONE_REPO_BRANCH] - --repo.private, --rp repository is private [$DRONE_REPO_PRIVATE] - --repo.trusted, --rt repository is trusted [$DRONE_REPO_TRUSTED] - --remote.url value, --remu value git remote url [$DRONE_REMOTE_URL] - --commit.sha value, --cs value git commit sha [$DRONE_COMMIT_SHA] - --commit.ref value, --cr value git commit ref (default: "refs/heads/master") [$DRONE_COMMIT_REF] - --commit.branch value, --cb value git commit branch (default: "master") [$DRONE_COMMIT_BRANCH] - --commit.message value, --cm value git commit message [$DRONE_COMMIT_MESSAGE] - --commit.link value, --cl value git commit link [$DRONE_COMMIT_LINK] - --commit.author.name value, --an value git author name [$DRONE_COMMIT_AUTHOR] - --commit.author.email value, --ae value git author email [$DRONE_COMMIT_AUTHOR_EMAIL] - --commit.author.avatar value, --aa value git author avatar [$DRONE_COMMIT_AUTHOR_AVATAR] - --build.event value, --be value build event (default: "push") [$DRONE_BUILD_EVENT] - --build.number value, --bn value build number (default: 0) [$DRONE_BUILD_NUMBER] - --build.created value, --bc value build created (default: 0) [$DRONE_BUILD_CREATED] - --build.started value, --bs value build started (default: 0) [$DRONE_BUILD_STARTED] - --build.finished value, --bf value build finished (default: 0) [$DRONE_BUILD_FINISHED] - --build.status value, --bstat value build status (default: "success") [$DRONE_BUILD_STATUS] - --build.link value, --bl value build link [$DRONE_BUILD_LINK] - --build.deploy value, --db value build deployment target [$DRONE_DEPLOY_TO] - --yaml.verified, --yv build yaml is verified [$DRONE_YAML_VERIFIED] - --yaml.signed, --ys build yaml is signed [$DRONE_YAML_SIGNED] - --prev.build.number value, --pbn value previous build number (default: 0) [$DRONE_PREV_BUILD_NUMBER] - --prev.build.status value, --pbst value previous build status [$DRONE_PREV_BUILD_STATUS] - --prev.commit.sha value, --pcs value previous build sha [$DRONE_PREV_COMMIT_SHA] - --backend value, -b value cache backend to use in plugin (s3, filesystem) (default: "s3") [$PLUGIN_BACKEND] - --mount value, -m value cache directories, an array of folders to cache [$PLUGIN_MOUNT] - --rebuild, --reb rebuild the cache directories [$PLUGIN_REBUILD] - --restore, --res restore the cache directories [$PLUGIN_RESTORE] - --cache-key value, --chk value cache key to use for the cache directories [$PLUGIN_CACHE_KEY] - --archive-format value, --arcfmt value archive format to use to store the cache directories (tar, gzip) (default: "tar") [$PLUGIN_ARCHIVE_FORMAT] - --compression-level value, --cpl value compression level to use for gzip compression when archive-format specified as gzip - (check https://godoc.org/compress/flate#pkg-constants for available options) (default: -1) [$PLUGIN_COMPRESSION_LEVEL] - --skip-symlinks, --ss skip symbolic links in archive [$PLUGIN_SKIP_SYMLINKS, $ SKIP_SYMLINKS] - --debug, -d debug [$PLUGIN_DEBUG, $ DEBUG] - --filesystem-cache-root value, --fcr value local filesystem root directory for the filesystem cache (default: "/tmp/cache") [$PLUGIN_FILESYSTEM_CACHE_ROOT, $ FILESYSTEM_CACHE_ROOT] - --endpoint value, -e value endpoint for the s3/cloud storage connection [$PLUGIN_ENDPOINT, $S3_ENDPOINT, $CLOUD_STORAGE_ENDPOINT] - --access-key value, --akey value AWS access key [$PLUGIN_ACCESS_KEY, $AWS_ACCESS_KEY_ID, $CACHE_AWS_ACCESS_KEY_ID] - --secret-key value, --skey value AWS/GCP secret key [$PLUGIN_SECRET_KEY, $AWS_SECRET_ACCESS_KEY, $CACHE_AWS_SECRET_ACCESS_KEY, $GCP_API_KEY] - --bucket value, --bckt value AWS bucket name [$PLUGIN_BUCKET, $S3_BUCKET, $CLOUD_STORAGE_BUCKET] - --region value, --reg value AWS bucket region. (us-east-1, eu-west-1, ...) [$PLUGIN_REGION, $S3_REGION] - --path-style, --ps use path style for bucket paths. (true for minio, false for aws) [$PLUGIN_PATH_STYLE] - --acl value upload files with acl (private, public-read, ...) (default: "private") [$PLUGIN_ACL] - --encryption value, --enc value server-side encryption algorithm, defaults to none. (AES256, aws:kms) [$PLUGIN_ENCRYPTION] - --azure-account-name value Azure Blob Storage Account Name [$PLUGIN_ACCOUNT_NAME, $AZURE_ACCOUNT_NAME] - --azure-account-key value Azure Blob Storage Account Key [$PLUGIN_ACCOUNT_KEY, $AZURE_ACCOUNT_KEY] - --azure-container-name value Azure Blob Storage container name [$PLUGIN_CONTAINER, $AZURE_CONTAINER_NAME] - --azure-blob-storage-url value Azure Blob Storage URL (default: "blob.core.windows.net") [$AZURE_BLOB_STORAGE_URL] - --sftp-cache-root value sftp root directory [$SFTP_CACHE_ROOT] - --sftp-username value sftp username [$SFTP_USERNAME] - --sftp-password value sftp password [$SFTP_PASSWORD] - --ftp-public-key-file value sftp public key file path [$SFTP_PUBLIC_KEY_FILE] - --sftp-auth-method value sftp auth method, defaults to none. (PASSWORD, PUBLIC_KEY_FILE) [$SFTP_AUTH_METHOD] - --sftp-host value sftp host [$SFTP_HOST] - --sftp-port value sftp port [$SFTP_PORT] - --help, -h show help - --version, -v print the version + --log.level value log filtering level. ('error', 'warn', 'info', 'debug') (default: "info") [$PLUGIN_LOG_LEVEL, $LOG_LEVEL] + --log.format value log format to use. ('logfmt', 'json') (default: "logfmt") [$PLUGIN_LOG_FORMAT, $LOG_FORMAT] + --repo.fullname value repository full name [$DRONE_REPO] + --repo.namespace value repository namespace [$DRONE_REPO_NAMESPACE] + --repo.owner value repository owner (for Drone version < 1.0) [$DRONE_REPO_OWNER] + --repo.name value repository name [$DRONE_REPO_NAME] + --repo.link value repository link [$DRONE_REPO_LINK] + --repo.avatar value repository avatar [$DRONE_REPO_AVATAR] + --repo.branch value repository default branch [$DRONE_REPO_BRANCH] + --repo.private repository is private (default: false) [$DRONE_REPO_PRIVATE] + --repo.trusted repository is trusted (default: false) [$DRONE_REPO_TRUSTED] + --remote.url value git remote url [$DRONE_REMOTE_URL] + --commit.sha value git commit sha [$DRONE_COMMIT_SHA] + --commit.ref value git commit ref (default: "refs/heads/master") [$DRONE_COMMIT_REF] + --commit.branch value git commit branch (default: "master") [$DRONE_COMMIT_BRANCH] + --commit.message value git commit message [$DRONE_COMMIT_MESSAGE] + --commit.link value git commit link [$DRONE_COMMIT_LINK] + --commit.author.name value git author name [$DRONE_COMMIT_AUTHOR] + --commit.author.email value git author email [$DRONE_COMMIT_AUTHOR_EMAIL] + --commit.author.avatar value git author avatar [$DRONE_COMMIT_AUTHOR_AVATAR] + --build.event value build event (default: "push") [$DRONE_BUILD_EVENT] + --build.number value build number (default: 0) [$DRONE_BUILD_NUMBER] + --build.created value build created (default: 0) [$DRONE_BUILD_CREATED] + --build.started value build started (default: 0) [$DRONE_BUILD_STARTED] + --build.finished value build finished (default: 0) [$DRONE_BUILD_FINISHED] + --build.status value build status (default: "success") [$DRONE_BUILD_STATUS] + --build.link value build link [$DRONE_BUILD_LINK] + --build.deploy value build deployment target [$DRONE_DEPLOY_TO] + --yaml.verified build yaml is verified (default: false) [$DRONE_YAML_VERIFIED] + --yaml.signed build yaml is signed (default: false) [$DRONE_YAML_SIGNED] + --prev.build.number value previous build number (default: 0) [$DRONE_PREV_BUILD_NUMBER] + --prev.build.status value previous build status [$DRONE_PREV_BUILD_STATUS] + --prev.commit.sha value previous build sha [$DRONE_PREV_COMMIT_SHA] + --backend value cache backend to use in plugin (s3, filesystem, sftp, azure, gcs) (default: "s3") [$PLUGIN_BACKEND] + --mount value cache directories, an array of folders to cache [$PLUGIN_MOUNT] + --rebuild rebuild the cache directories (default: false) [$PLUGIN_REBUILD] + --restore restore the cache directories (default: false) [$PLUGIN_RESTORE] + --cache-key value cache key to use for the cache directories [$PLUGIN_CACHE_KEY] + --archive-format value archive format to use to store the cache directories (tar, gzip) (default: "tar") [$PLUGIN_ARCHIVE_FORMAT] + --compression-level value compression level to use for gzip compression when archive-format specified as gzip + (check https://godoc.org/compress/flate#pkg-constants for available options) (default: -1) [$PLUGIN_COMPRESSION_LEVEL] + --skip-symlinks skip symbolic links in archive (default: false) [$PLUGIN_SKIP_SYMLINKS, $SKIP_SYMLINKS] + --debug debug (default: false) [$PLUGIN_DEBUG, DEBUG] + --backend.operation-timeout value timeout value to use for each storage operations (default: 3m0s) [$PLUGIN_BACKEND_OPERATION_TIMEOUT, $BACKEND_OPERATION_TIMEOUT] + --endpoint value endpoint for the s3/cloud storage connection [$PLUGIN_ENDPOINT, $S3_ENDPOINT, $GCS_ENDPOINT] + --bucket value AWS bucket name [$PLUGIN_BUCKET, $S3_BUCKET, $GCS_BUCKET] + --filesystem.cache-root value local filesystem root directory for the filesystem cache (default: "/tmp/cache") [$PLUGIN_FILESYSTEM_CACHE_ROOT, $FILESYSTEM_CACHE_ROOT] + --access-key value AWS access key [$PLUGIN_ACCESS_KEY, $AWS_ACCESS_KEY_ID, $CACHE_AWS_ACCESS_KEY_ID] + --secret-key value AWS secret key [$PLUGIN_SECRET_KEY, $AWS_SECRET_ACCESS_KEY, $CACHE_AWS_SECRET_ACCESS_KEY] + --region value AWS bucket region. (us-east-1, eu-west-1, ...) [$PLUGIN_REGION, $S3_REGION] + --path-style AWS path style to use for bucket paths. (true for minio, false for aws) (default: false) [$PLUGIN_PATH_STYLE, $AWS_PLUGIN_PATH_STYLE] + --acl value upload files with acl (private, public-read, ...) (default: "private") [$PLUGIN_ACL, $AWS_ACL] + --encryption value server-side encryption algorithm, defaults to none. (AES256, aws:kms) [$PLUGIN_ENCRYPTION, $AWS_ENCRYPTION] + --gcs.api-key value Google service account API key [$PLUGIN_API_KEY, $GCP_API_KEY] + --gcs.json-key value Google service account JSON key [$PLUGIN_JSON_KEY, $GCS_CACHE_JSON_KEY] + --gcs.acl value upload files with acl (private, public-read, ...) (default: "private") [$PLUGIN_GCS_ACL, $GCS_ACL] + --gcs.encryption-key value server-side encryption key, must be a 32-byte AES-256 key, defaults to none + (See https://cloud.google.com/storage/docs/encryption for details.) [$PLUGIN_GCS_ENCRYPTION_KEY, $GCS_ENCRYPTION_KEY] + --azure.account-name value Azure Blob Storage Account Name [$PLUGIN_ACCOUNT_NAME, $AZURE_ACCOUNT_NAME] + --azure.account-key value Azure Blob Storage Account Key [$PLUGIN_ACCOUNT_KEY, $AZURE_ACCOUNT_KEY] + --azure.blob-container-name value Azure Blob Storage container name [$PLUGIN_CONTAINER, $AZURE_CONTAINER_NAME] + --azure.blob-storage-url value Azure Blob Storage URL (default: "blob.core.windows.net") [$AZURE_BLOB_STORAGE_URL] + --azure.blob-max-retry-requets value Azure Blob Storage Max Retry Requests (default: 4) [$AZURE_BLOB_MAX_RETRY_REQUESTS] + --sftp.cache-root value sftp root directory [$SFTP_CACHE_ROOT] + --sftp.username value sftp username [$PLUGIN_USERNAME, $SFTP_USERNAME] + --sftp.password value sftp password [$PLUGIN_PASSWORD, $SFTP_PASSWORD] + --sftp.public-key-file value sftp public key file path [$PLUGIN_PUBLIC_KEY_FILE, $SFTP_PUBLIC_KEY_FILE] + --sftp.auth-method value sftp auth method, defaults to none. (PASSWORD, PUBLIC_KEY_FILE) [$SFTP_AUTH_METHOD] + --sftp.host value sftp host [$SFTP_HOST] + --sftp.port value sftp port [$SFTP_PORT] + --help, -h show help (default: false) + --version, -v print the version (default: false) ``` ### Using Docker (with Environment variables) @@ -191,7 +217,7 @@ $ docker run --rm \ ### Local set-up ```console -$ ./scripts/setup_dev_environment.sh +$ make setup ``` ### Tests @@ -200,19 +226,12 @@ $ ./scripts/setup_dev_environment.sh $ make test ``` -OR - -```console -$ docker-compose up -d -$ go test ./.. -``` - ### Build Binary Build the binary with the following commands: ```console -$ go build . +$ make build ``` ### Build Docker image @@ -225,7 +244,7 @@ $ make container ## Releases -Release management handled by CI pipeline. When you create a tag on `master` branch, CI handles the rest. +Release management handled by the CI pipeline. When you create a tag on `master` branch, CI handles the rest. You can find released artifacts (binaries, code, archives) under [releases](https://github.com/meltwater/drone-cache/releases). @@ -233,25 +252,27 @@ You can find released images at [DockerHub](https://hub.docker.com/r/meltwater/d **PLEASE DO NOT INTRODUCE BREAKING CHANGES** -> Keep in mind that users usually use the image tagged with `latest` in their pipeline, please make sure you do not interfere with their working workflow. +> Keep in mind that users usually use the image tagged with `latest` in their pipeline, please make sure you do not interfere with their working workflow. Latest stable releases will be tagged with the `latest`. ## Versioning -We use [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/meltwater/drone-cache/tags). +`drone-cache` uses [SemVer](http://semver.org/) for versioning. For the versions available, see the [tags on this repository](https://github.com/meltwater/drone-cache/tags). + +As the versioning scheme dictates, `drone-cache` respects _backward compatibility_ within the major versions. However, the project only offers guarantees regarding the command-line interface (flags and environment variables). **Any exported public package can change its API.** ## Authors and Acknowledgement +See the list of [all contributors](https://github.com/meltwater/drone-cache/graphs/contributors). + - [@dim](https://github.com/dim) - Thanks for [original work](https://github.com/bsm/drone-s3-cache)! -- [@kakkoyun](https://github.com/kakkoyun) -- [@salimane](https://github.com/salimane) - [@AdamGlazerMW](https://github.com/AdamGlazerMW) - Special thanks to Adam for the amazing artwork! -Also see the list of [all contributors](https://github.com/meltwater/drone-cache/graphs/contributors). - ### Inspiration - [github.com/bsm/drone-s3-cache](https://github.com/bsm/drone-s3-cache) (original work) - [github.com/Drillster/drone-volume-cache](https://github.com/Drillster/drone-volume-cache) +- [github.com/drone/drone-cache-lib](https://github.com/drone/drone-cache-lib) + > From the version `v1.1.0` and forward, `drone-cache` conforms interfaces from `github.com/drone/drone-cache-lib`, with anticipation of [a future cache plugin interface in the configuration](https://github.com/drone/drone/issues/2060). ## Contributing @@ -259,9 +280,9 @@ Please read [CONTRIBUTING.md](CONTRIBUTING.md) to understand how to submit pull ## Future work -We keep all ideas for new features and bug reports in [github.com/meltwater/drone-cache/issues](https://github.com/meltwater/drone-cache/issues). +All ideas for new features and bug reports will be kept in [github.com/meltwater/drone-cache/issues](https://github.com/meltwater/drone-cache/issues). -One bigger area of future investment is to build a couple of [new storage backends](https://github.com/meltwater/drone-cache/labels/storage-backend) for caching the workspace files. +One bigger area of future investment is to add a couple of [new storage backends](https://github.com/meltwater/drone-cache/labels/storage-backend) for caching the workspace files. ## License and Copyright diff --git a/docs/examples/drone-1.0.md b/docs/examples/drone-1.0.md index 63fbcdfb..1ee71a1a 100644 --- a/docs/examples/drone-1.0.md +++ b/docs/examples/drone-1.0.md @@ -27,7 +27,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -51,7 +51,7 @@ steps: ### Simple (Filesystem/Volume) -NOTE: This will only be effective if your pipeline runs on the same agent each time (for example, if you are running drone in single-machine mode). +NOTE: This will only be effective if your pipeline runs on the same agent each time (for example, if you are running the drone in single-machine mode). ```yaml kind: pipeline @@ -74,7 +74,7 @@ steps: path: /tmp/cache - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -127,7 +127,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -175,7 +175,7 @@ steps: - 'vendor' - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git @@ -214,7 +214,7 @@ steps: debug: true - name: build - image: golang:1.13-alpine + image: golang:1.14-alpine pull: true commands: - apk add --update make git From 50dde38ce41c6bbfd73f1116736f95bec3404e7c Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Thu, 2 Apr 2020 19:25:35 +0200 Subject: [PATCH 10/16] Upgrade and pin go version. Update dependencies --- .drone.yml | 8 +-- DOCS.md | 10 +-- Dockerfile | 2 +- README.md | 2 +- docker/Dockerfile.linux.386 | 2 +- docker/Dockerfile.linux.amd64 | 2 +- docker/Dockerfile.linux.arm64 | 2 +- docker/Dockerfile.linux.arm_5 | 2 +- docker/Dockerfile.linux.arm_6 | 2 +- docker/Dockerfile.linux.arm_7 | 2 +- docs/examples/drone-1.0.md | 10 +-- go.mod | 23 +++--- go.sum | 130 ++++++++++++++++++---------------- 13 files changed, 100 insertions(+), 97 deletions(-) diff --git a/.drone.yml b/.drone.yml index ecf108b8..48c5f0ba 100644 --- a/.drone.yml +++ b/.drone.yml @@ -23,7 +23,7 @@ steps: - mc mb --region=eu-west-1 minio/drone-cache-bucket - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 commands: - apk add --update make git - make drone-cache @@ -31,7 +31,7 @@ steps: CGO_ENABLED: 0 - name: lint - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 commands: - apk add --update make git curl - make lint @@ -39,7 +39,7 @@ steps: CGO_ENABLED: 0 - name: test - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 commands: - go test -mod=vendor -short -cover -tags=integration ./... environment: @@ -267,7 +267,7 @@ steps: AWS_SECRET_ACCESS_KEY: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - name: build-after - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 commands: - apk add --update make git - make drone-cache diff --git a/DOCS.md b/DOCS.md index bdd21e0d..91462899 100644 --- a/DOCS.md +++ b/DOCS.md @@ -115,7 +115,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -160,7 +160,7 @@ steps: path: /tmp/cache - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -212,7 +212,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -260,7 +260,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -299,7 +299,7 @@ steps: debug: true - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git diff --git a/Dockerfile b/Dockerfile index 9664ed70..b5236e0b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/README.md b/README.md index 34c42e19..21871990 100644 --- a/README.md +++ b/README.md @@ -73,7 +73,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git diff --git a/docker/Dockerfile.linux.386 b/docker/Dockerfile.linux.386 index 29271c32..a591a2d3 100644 --- a/docker/Dockerfile.linux.386 +++ b/docker/Dockerfile.linux.386 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docker/Dockerfile.linux.amd64 b/docker/Dockerfile.linux.amd64 index 9664ed70..b5236e0b 100644 --- a/docker/Dockerfile.linux.amd64 +++ b/docker/Dockerfile.linux.amd64 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docker/Dockerfile.linux.arm64 b/docker/Dockerfile.linux.arm64 index 4f38a52b..2570eb5e 100644 --- a/docker/Dockerfile.linux.arm64 +++ b/docker/Dockerfile.linux.arm64 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docker/Dockerfile.linux.arm_5 b/docker/Dockerfile.linux.arm_5 index 1d6a0ba0..105ad97e 100644 --- a/docker/Dockerfile.linux.arm_5 +++ b/docker/Dockerfile.linux.arm_5 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docker/Dockerfile.linux.arm_6 b/docker/Dockerfile.linux.arm_6 index 6d0bdf3c..1d551c0f 100644 --- a/docker/Dockerfile.linux.arm_6 +++ b/docker/Dockerfile.linux.arm_6 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docker/Dockerfile.linux.arm_7 b/docker/Dockerfile.linux.arm_7 index d2647f41..8edd6e40 100644 --- a/docker/Dockerfile.linux.arm_7 +++ b/docker/Dockerfile.linux.arm_7 @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine AS builder +FROM golang:1.14.1-alpine3.11 AS builder RUN apk add --update --no-cache ca-certificates tzdata && update-ca-certificates RUN echo "[WARNING] Make sure you have run 'goreleaser release', before 'docker build'!" diff --git a/docs/examples/drone-1.0.md b/docs/examples/drone-1.0.md index 1ee71a1a..95dde7b0 100644 --- a/docs/examples/drone-1.0.md +++ b/docs/examples/drone-1.0.md @@ -27,7 +27,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -74,7 +74,7 @@ steps: path: /tmp/cache - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -127,7 +127,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -175,7 +175,7 @@ steps: - 'vendor' - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git @@ -214,7 +214,7 @@ steps: debug: true - name: build - image: golang:1.14-alpine + image: golang:1.14.1-alpine3.11 pull: true commands: - apk add --update make git diff --git a/go.mod b/go.mod index 9821ec11..2ca569c8 100644 --- a/go.mod +++ b/go.mod @@ -6,22 +6,21 @@ require ( github.com/Azure/go-autorest/autorest/adal v0.8.1 // indirect github.com/aws/aws-sdk-go v1.16.35 github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-ini/ini v1.41.0 // indirect + github.com/dustin/go-humanize v1.0.0 github.com/go-kit/kit v0.9.0 github.com/go-logfmt/logfmt v0.4.0 // indirect github.com/go-stack/stack v1.8.0 // indirect - github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e // indirect - github.com/jtolds/gls v4.2.1+incompatible // indirect - github.com/minio/minio-go v6.0.14+incompatible - github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/google/go-cmp v0.4.0 github.com/pkg/sftp v1.10.1 - github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 // indirect - github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c // indirect - github.com/urfave/cli v1.20.0 - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/sys v0.0.0-20191008105621-543471e840be // indirect + github.com/urfave/cli/v2 v2.1.1 + golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 + golang.org/x/net v0.0.0-20200301022130-244492dfa37a // indirect + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e // indirect + golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 // indirect google.golang.org/api v0.9.0 - gopkg.in/ini.v1 v1.41.0 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.5 // indirect ) -go 1.13 +go 1.14 diff --git a/go.sum b/go.sum index 2b135073..02489a23 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,19 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.1.0 h1:KYV0dnmEcOuxTd8YLiuQqfx8PzSwDeuDvYGoa5+DbDI= +cloud.google.com/go/storage v1.1.0/go.mod h1:a81gKs1KmeOyF/qrbeu4APVXICPLcsl0Ilx2XvD7ZYU= github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= @@ -18,37 +34,23 @@ github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1Gn github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/aws/aws-sdk-go v1.16.35 h1:qz1h7uxswkVaE6kJPoPWwt3F76HlCLrg/UyDJq3cavc= -github.com/aws/aws-sdk-go v1.16.35/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.1.0 h1:KYV0dnmEcOuxTd8YLiuQqfx8PzSwDeuDvYGoa5+DbDI= -cloud.google.com/go/storage v1.1.0/go.mod h1:a81gKs1KmeOyF/qrbeu4APVXICPLcsl0Ilx2XvD7ZYU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/aws/aws-sdk-go v1.16.35 h1:qz1h7uxswkVaE6kJPoPWwt3F76HlCLrg/UyDJq3cavc= +github.com/aws/aws-sdk-go v1.16.35 h1:qz1h7uxswkVaE6kJPoPWwt3F76HlCLrg/UyDJq3cavc= +github.com/aws/aws-sdk-go v1.16.35/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.16.35/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/go-ini/ini v1.41.0 h1:526aoxDtxRHFQKMZfcX2OG9oOI8TJ5yPLM0Mkno/uTY= -github.com/go-ini/ini v1.41.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= @@ -69,6 +71,8 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -77,8 +81,6 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e h1:JKmoR8x90Iww1ks85zJ1lfDGgIiMDuIptTOhJq+zKyg= -github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -86,27 +88,21 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5i github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149 h1:HfxbT6/JcvIljmERptWhwa8XzP7H3T+Z2N26gTsaDaA= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/minio/minio-go v6.0.14+incompatible h1:fnV+GD28LeqdN6vT2XdGKW8Qe/IfjJDswNVuni6km9o= -github.com/minio/minio-go v6.0.14+incompatible/go.mod h1:7guKYtitv8dktvNUGrhzmNlA5wrAABTQXCoesZdFQO8= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/sftp v1.10.1 h1:VasscCm72135zRysgrJDKsntdmPN+OuU3+nnHYA9wyc= @@ -114,44 +110,29 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304 h1:Jpy1PXuP99tXNrhbq2BaPz9B+jNAvH1JPQQpG/9GCXY= -github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c h1:Ho+uVpkel/udgjbwB5Lktg9BtvJSh2DT0Hi6LPSyI2w= -github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be h1:QAcqgptGM8IQBC9K/RC4o+O9YmqEm0diQn9QmZw/0mU= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE= -gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +github.com/urfave/cli/v2 v2.1.1 h1:Qt8FeAtxE/vfdrLmR3rxR6JRE0RoVmbXu8+6kZtYU4k= +github.com/urfave/cli/v2 v2.1.1/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= +golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -177,12 +158,16 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= @@ -193,17 +178,26 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -225,6 +219,8 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190917162342-3b4f30a44f3b h1:5PDpbTpVmeVPIQOoxshLbs4ATaIDQrZN5z3nTUtm2+8= golang.org/x/tools v0.0.0-20190917162342-3b4f30a44f3b/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -248,10 +244,18 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/ini.v1 v1.41.0 h1:Ka3ViY6gNYSKiVy71zXBEqKplnV35ImDLVG+8uoIklE= -gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 0ef9e9010dc0eddd36cd2037d904b761215e9916 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 11:13:31 +0200 Subject: [PATCH 11/16] Handle nested dirs --- archive/archive.go | 8 ++--- archive/gzip/gzip.go | 9 ++--- archive/gzip/gzip_test.go | 65 ++++++++++++++++++++++++++------- archive/option.go | 12 +++---- archive/tar/tar.go | 66 ++++++++++++++++------------------ archive/tar/tar_test.go | 65 ++++++++++++++++++++++++++------- internal/plugin/plugin.go | 7 +++- internal/plugin/plugin_test.go | 40 +++++++++++++++++---- 8 files changed, 192 insertions(+), 80 deletions(-) diff --git a/archive/archive.go b/archive/archive.go index 800e02af..67f976f8 100644 --- a/archive/archive.go +++ b/archive/archive.go @@ -31,7 +31,7 @@ type Archive interface { } // FromFormat determines which archive to use from given archive format. -func FromFormat(logger log.Logger, format string, opts ...Option) Archive { +func FromFormat(logger log.Logger, root string, format string, opts ...Option) Archive { options := options{ compressionLevel: DefaultCompressionLevel, } @@ -42,11 +42,11 @@ func FromFormat(logger log.Logger, format string, opts ...Option) Archive { switch format { case Gzip: - return gzip.New(logger, options.skipSymlinks, options.compressionLevel) + return gzip.New(logger, root, options.skipSymlinks, options.compressionLevel) case Tar: - return tar.New(logger, options.skipSymlinks) + return tar.New(logger, root, options.skipSymlinks) default: level.Error(logger).Log("msg", "unknown archive format", "format", format) - return tar.New(logger, options.skipSymlinks) // DefaultArchiveFormat + return tar.New(logger, root, options.skipSymlinks) // DefaultArchiveFormat } } diff --git a/archive/gzip/gzip.go b/archive/gzip/gzip.go index ffc7f2da..2f4fd251 100644 --- a/archive/gzip/gzip.go +++ b/archive/gzip/gzip.go @@ -15,13 +15,14 @@ import ( type Archive struct { logger log.Logger + root string compressionLevel int skipSymlinks bool } // New creates an archive that uses the .tar.gz file format. -func New(logger log.Logger, skipSymlinks bool, compressionLevel int) *Archive { - return &Archive{logger, compressionLevel, skipSymlinks} +func New(logger log.Logger, root string, skipSymlinks bool, compressionLevel int) *Archive { + return &Archive{logger, root, compressionLevel, skipSymlinks} } // Create writes content of the given source to an archive, returns written bytes. @@ -33,7 +34,7 @@ func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { defer internal.CloseWithErrLogf(a.logger, gw, "gzip writer") - return tar.New(a.logger, a.skipSymlinks).Create(srcs, gw) + return tar.New(a.logger, a.root, a.skipSymlinks).Create(srcs, gw) } // Extract reads content from the given archive reader and restores it to the destination, returns written bytes. @@ -45,5 +46,5 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { defer internal.CloseWithErrLogf(a.logger, gr, "gzip reader") - return tar.New(a.logger, a.skipSymlinks).Extract(dst, gr) + return tar.New(a.logger, a.root, a.skipSymlinks).Extract(dst, gr) } diff --git a/archive/gzip/gzip_test.go b/archive/gzip/gzip_test.go index 4a93b2fa..91e81ed3 100644 --- a/archive/gzip/gzip_test.go +++ b/archive/gzip/gzip_test.go @@ -35,14 +35,14 @@ func TestCreate(t *testing.T) { }{ { name: "empty mount paths", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), srcs: []string{}, written: 0, err: nil, }, { name: "non-existing mount paths", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), srcs: []string{ "iamnotexists", "metoo", @@ -52,14 +52,21 @@ func TestCreate(t *testing.T) { }, { name: "existing mount paths", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), srcs: exampleFileTree(t, "gzip_create"), written: 43, // 3 x tmpfile in dir, 1 tmpfile err: nil, }, + { + name: "existing mount nested paths", + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), + srcs: exampleNestedFileTree(t, "tar_create"), + written: 56, // 4 x tmpfile in dir, 1 tmpfile + err: nil, + }, { name: "existing mount paths with symbolic links", - tgz: New(log.NewNopLogger(), false, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, false, flate.DefaultCompression), srcs: exampleFileTreeWithSymlinks(t, "gzip_create_symlink"), written: 43, err: nil, @@ -100,17 +107,21 @@ func TestExtract(t *testing.T) { t.Cleanup(func() { os.RemoveAll(testRoot) }) // Setup - tgz := New(log.NewNopLogger(), false, flate.DefaultCompression) + tgz := New(log.NewNopLogger(), testRootMounted, false, flate.DefaultCompression) arcDir, arcDirClean := test.CreateTempDir(t, "gzip_extract_archive") t.Cleanup(arcDirClean) files := exampleFileTree(t, "gzip_extract") - archivePath := filepath.Join(arcDir, "test.tar.gz") _, err := create(tgz, files, archivePath) test.Ok(t, err) + nestedFiles := exampleNestedFileTree(t, "gzip_extract_nested") + nestedArchivePath := filepath.Join(arcDir, "nested_test.tar.gz") + _, err = create(tgz, nestedFiles, nestedArchivePath) + test.Ok(t, err) + filesWithSymlink := exampleFileTreeWithSymlinks(t, "gzip_extract_symlink") archiveWithSymlinkPath := filepath.Join(arcDir, "test_with_symlink.tar.gz") _, err = create(tgz, filesWithSymlink, archiveWithSymlinkPath) @@ -133,7 +144,7 @@ func TestExtract(t *testing.T) { }{ { name: "non-existing archive", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), archivePath: "iamnotexists", srcs: []string{}, written: 0, @@ -141,7 +152,7 @@ func TestExtract(t *testing.T) { }, { name: "non-existing root destination", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), archivePath: emptyArchivePath, srcs: []string{}, written: 0, @@ -149,7 +160,7 @@ func TestExtract(t *testing.T) { }, { name: "empty archive", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), archivePath: emptyArchivePath, srcs: []string{}, written: 0, @@ -157,7 +168,7 @@ func TestExtract(t *testing.T) { }, { name: "bad archives", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), archivePath: badArchivePath, srcs: []string{}, written: 0, @@ -165,15 +176,23 @@ func TestExtract(t *testing.T) { }, { name: "existing archive", - tgz: New(log.NewNopLogger(), true, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), archivePath: archivePath, srcs: files, written: 43, err: nil, }, + { + name: "existing archive with nested files", + tgz: New(log.NewNopLogger(), testRootMounted, true, flate.DefaultCompression), + archivePath: nestedArchivePath, + srcs: nestedFiles, + written: 56, + err: nil, + }, { name: "existing archive with symbolic links", - tgz: New(log.NewNopLogger(), false, flate.DefaultCompression), + tgz: New(log.NewNopLogger(), testRootMounted, false, flate.DefaultCompression), archivePath: archiveWithSymlinkPath, srcs: filesWithSymlink, written: 43, @@ -263,6 +282,28 @@ func exampleFileTree(t *testing.T, name string) []string { return []string{file, dir} } +func exampleNestedFileTree(t *testing.T, name string) []string { + dir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + nestedFile, nestedFileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), dir) // 13 bytes + t.Cleanup(nestedFileClean) + + nestedDir, nestedDirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), dir) // 10 bytes + t.Cleanup(nestedDirClean) + + nestedDir1, nestedDirClean1 := test.CreateTempDir(t, name, dir) + t.Cleanup(nestedDirClean1) + + nestedDir2, nestedDirClean2 := test.CreateTempDir(t, name, nestedDir1) + t.Cleanup(nestedDirClean2) + + nestedFile1, nestedFileClean1 := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), nestedDir2) // 13 bytes + t.Cleanup(nestedFileClean1) + + return []string{nestedDir, nestedFile, nestedFile1} +} + func exampleFileTreeWithSymlinks(t *testing.T, name string) []string { file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes t.Cleanup(fileClean) diff --git a/archive/option.go b/archive/option.go index b794b697..64cb93ef 100644 --- a/archive/option.go +++ b/archive/option.go @@ -16,16 +16,16 @@ func (f optionFunc) apply(o *options) { f(o) } -// WithSkipSymlinks sets skip symlink option. -func WithSkipSymlinks(b bool) Option { +// WithCompressionLevel sets compression level option. +func WithCompressionLevel(i int) Option { return optionFunc(func(o *options) { - o.skipSymlinks = b + o.compressionLevel = i }) } -// WithCompressionLevel sets compression level option. -func WithCompressionLevel(i int) Option { +// WithSkipSymlinks sets skip symlink option. +func WithSkipSymlinks(b bool) Option { return optionFunc(func(o *options) { - o.compressionLevel = i + o.skipSymlinks = b }) } diff --git a/archive/tar/tar.go b/archive/tar/tar.go index 383defda..304f17ba 100644 --- a/archive/tar/tar.go +++ b/archive/tar/tar.go @@ -13,6 +13,8 @@ import ( "github.com/meltwater/drone-cache/internal" ) +const defaultDirPermission = 0755 + var ( // ErrSourceNotReachable TODO ErrSourceNotReachable = errors.New("source not reachable") @@ -24,12 +26,13 @@ var ( type Archive struct { logger log.Logger + root string skipSymlinks bool } // New creates an archive that uses the .tar file format. -func New(logger log.Logger, skipSymlinks bool) *Archive { - return &Archive{logger, skipSymlinks} +func New(logger log.Logger, root string, skipSymlinks bool) *Archive { + return &Archive{logger, root, skipSymlinks} } // Create writes content of the given source to an archive, returns written bytes. @@ -40,19 +43,13 @@ func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { var written int64 for _, src := range srcs { - info, err := os.Lstat(src) + _, err := os.Lstat(src) if err != nil { return written, fmt.Errorf("make sure file or directory readable <%s>: %v, %w", src, err, ErrSourceNotReachable) } - if info.IsDir() { - if err := filepath.Walk(src, writeToArchive(tw, src, a.skipSymlinks, &written)); err != nil { - return written, fmt.Errorf("walk, add all files to archive %w", err) - } - } else { - if err := writeToArchive(tw, src, a.skipSymlinks, &written)(src, info, nil); err != nil { - return written, fmt.Errorf("add file to archive %w", err) - } + if err := filepath.Walk(src, writeToArchive(tw, a.root, a.skipSymlinks, &written)); err != nil { + return written, fmt.Errorf("walk, add all files to archive %w", err) } } @@ -87,9 +84,9 @@ func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int } } - name, err := relativeName(root, path) + name, err := relative(root, path) if err != nil { - return fmt.Errorf("relative name %w", err) + return fmt.Errorf("relative name <%s>: <%s> %w", path, root, err) } h.Name = name @@ -116,24 +113,18 @@ func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int } } -func relativeName(src, path string) (string, error) { - info, err := os.Lstat(src) - if err != nil { - return "", fmt.Errorf("%s: stat %w", src, err) - } - +func relative(parent string, path string) (string, error) { name := filepath.Base(path) - if info.IsDir() { - dir, err := filepath.Rel(filepath.Dir(src), filepath.Dir(path)) - if err != nil { - return "", fmt.Errorf("relative path %q: %q %v", path, dir, err) - } - - name = filepath.Join(filepath.ToSlash(dir), name) + rel, err := filepath.Rel(parent, filepath.Dir(path)) + if err != nil { + return "", fmt.Errorf("relative path <%s>, base <%s> %w", rel, name, err) } - return strings.TrimPrefix(filepath.ToSlash(name), "/"), nil + // NOTICE: filepath.Rel puts "../" when given path is not under parent. + rel = strings.TrimLeft(rel, "../") + rel = filepath.ToSlash(rel) + return strings.TrimPrefix(filepath.Join(rel, name), "/"), nil } func createSymlinkHeader(fi os.FileInfo, path string) (*tar.Header, error) { @@ -186,14 +177,19 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { } var target string - // NOTICE: It's been done like this to be compatible with normal behavior of a tar extract. - switch { - case filepath.Base(dst) == filepath.Dir(h.Name): - target = filepath.Join(filepath.Dir(dst), h.Name) - case filepath.Base(dst) == filepath.Base(h.Name): - target = filepath.Join(filepath.Dir(dst), h.Name) - default: - target = filepath.Join(dst, h.Name) + if dst == h.Name { + target = h.Name + } else { + name, err := relative(dst, h.Name) + if err != nil { + return 0, fmt.Errorf("relative name %w", err) + } + + target = filepath.Join(dst, name) + } + + if err := os.MkdirAll(filepath.Dir(target), defaultDirPermission); err != nil { + return 0, fmt.Errorf("ensure directory <%s> %w", target, err) } switch h.Typeflag { diff --git a/archive/tar/tar_test.go b/archive/tar/tar_test.go index 0a1a3fc5..d2f1b144 100644 --- a/archive/tar/tar_test.go +++ b/archive/tar/tar_test.go @@ -32,14 +32,14 @@ func TestCreate(t *testing.T) { }{ { name: "empty mount paths", - ta: New(log.NewNopLogger(), true), + ta: New(log.NewNopLogger(), testRootMounted, true), srcs: []string{}, written: 0, err: nil, }, { name: "non-existing mount paths", - ta: New(log.NewNopLogger(), true), + ta: New(log.NewNopLogger(), testRootMounted, true), srcs: []string{ "idonotexist", "metoo", @@ -49,14 +49,21 @@ func TestCreate(t *testing.T) { }, { name: "existing mount paths", - ta: New(log.NewNopLogger(), true), + ta: New(log.NewNopLogger(), testRootMounted, true), srcs: exampleFileTree(t, "tar_create"), written: 43, // 3 x tmpfile in dir, 1 tmpfile err: nil, }, + { + name: "existing mount nested paths", + ta: New(log.NewNopLogger(), testRootMounted, true), + srcs: exampleNestedFileTree(t, "tar_create"), + written: 56, // 4 x tmpfile in dir, 1 tmpfile + err: nil, + }, { name: "existing mount paths with symbolic links", - ta: New(log.NewNopLogger(), false), + ta: New(log.NewNopLogger(), testRootMounted, false), srcs: exampleFileTreeWithSymlinks(t, "tar_create_symlink"), written: 43, err: nil, @@ -98,17 +105,21 @@ func TestExtract(t *testing.T) { t.Cleanup(func() { os.RemoveAll(testRoot) }) // Setup - ta := New(log.NewNopLogger(), false) + ta := New(log.NewNopLogger(), testRootMounted, false) arcDir, arcDirClean := test.CreateTempDir(t, "tar_extract_archives", testRootMounted) t.Cleanup(arcDirClean) files := exampleFileTree(t, "tar_extract") - archivePath := filepath.Join(arcDir, "test.tar") _, err := create(ta, files, archivePath) test.Ok(t, err) + nestedFiles := exampleNestedFileTree(t, "tar_extract_nested") + nestedArchivePath := filepath.Join(arcDir, "nested_test.tar") + _, err = create(ta, nestedFiles, nestedArchivePath) + test.Ok(t, err) + filesWithSymlink := exampleFileTreeWithSymlinks(t, "tar_extract_symlink") archiveWithSymlinkPath := filepath.Join(arcDir, "test_with_symlink.tar") _, err = create(ta, filesWithSymlink, archiveWithSymlinkPath) @@ -131,7 +142,7 @@ func TestExtract(t *testing.T) { }{ { name: "non-existing archive", - ta: ta, + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: "idonotexist", srcs: []string{}, written: 0, @@ -139,7 +150,7 @@ func TestExtract(t *testing.T) { }, { name: "non-existing root destination", - ta: ta, + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: emptyArchivePath, srcs: []string{}, written: 0, @@ -147,7 +158,7 @@ func TestExtract(t *testing.T) { }, { name: "empty archive", - ta: ta, + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: emptyArchivePath, srcs: []string{}, written: 0, @@ -155,7 +166,7 @@ func TestExtract(t *testing.T) { }, { name: "bad archives", - ta: ta, + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: badArchivePath, srcs: []string{}, written: 0, @@ -163,15 +174,23 @@ func TestExtract(t *testing.T) { }, { name: "existing archive", - ta: ta, + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: archivePath, srcs: files, written: 43, err: nil, }, + { + name: "existing archive with nested files", + ta: New(log.NewNopLogger(), testRootMounted, false), + archivePath: nestedArchivePath, + srcs: nestedFiles, + written: 56, + err: nil, + }, { name: "existing archive with symbolic links", - ta: New(log.NewNopLogger(), false), + ta: New(log.NewNopLogger(), testRootMounted, false), archivePath: archiveWithSymlinkPath, srcs: filesWithSymlink, written: 43, @@ -264,6 +283,28 @@ func exampleFileTree(t *testing.T, name string) []string { return []string{file, dir} } +func exampleNestedFileTree(t *testing.T, name string) []string { + dir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + nestedFile, nestedFileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), dir) // 13 bytes + t.Cleanup(nestedFileClean) + + nestedDir, nestedDirClean := test.CreateTempFilesInDir(t, name, []byte("hello\ngo!\n"), dir) // 10 bytes + t.Cleanup(nestedDirClean) + + nestedDir1, nestedDirClean1 := test.CreateTempDir(t, name, dir) + t.Cleanup(nestedDirClean1) + + nestedDir2, nestedDirClean2 := test.CreateTempDir(t, name, nestedDir1) + t.Cleanup(nestedDirClean2) + + nestedFile1, nestedFileClean1 := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), nestedDir2) // 13 bytes + t.Cleanup(nestedFileClean1) + + return []string{nestedDir, nestedFile, nestedFile1} +} + func exampleFileTreeWithSymlinks(t *testing.T, name string) []string { file, fileClean := test.CreateTempFile(t, name, []byte("hello\ndrone!\n"), testRootMounted) // 13 bytes t.Cleanup(fileClean) diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go index a3b438e0..6580a016 100644 --- a/internal/plugin/plugin.go +++ b/internal/plugin/plugin.go @@ -62,6 +62,11 @@ func (p *Plugin) Exec() error { return errors.New("rebuild and restore are mutually exclusive, please set only one of them") } + workspace, err := os.Getwd() + if err != nil { + return fmt.Errorf("get working directory %w", err) + } + var options []cache.Option options = append(options, cache.WithNamespace(p.Metadata.Repo.Name)) @@ -94,7 +99,7 @@ func (p *Plugin) Exec() error { // 3. Initialize cache. c := cache.New(p.logger, storage.New(p.logger, b, cfg.StorageOperationTimeout), - archive.FromFormat(p.logger, cfg.ArchiveFormat, + archive.FromFormat(p.logger, workspace, cfg.ArchiveFormat, archive.WithSkipSymlinks(cfg.SkipSymlinks), archive.WithCompressionLevel(cfg.CompressionLevel), ), diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index 73da6e2c..c7aade46 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -97,6 +97,13 @@ func TestPlugin(t *testing.T) { }, success: true, }, + { + name: "existing mount with nested files", + mount: func(name string) []string { + return exampleNestedFileTree(t, name, make([]byte, 1*1024)) + }, + success: true, + }, { name: "existing mount with cache key", mount: func(name string) []string { @@ -124,9 +131,9 @@ func TestPlugin(t *testing.T) { for i, tc := range cases { i, tc := i, tc // NOTICE: https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables. - for _, fmt := range formats { + for _, f := range formats { for b, setup := range backends { - name := strings.Join([]string{strconv.Itoa(i), tc.name, b, fmt}, "-") + name := strings.Join([]string{strconv.Itoa(i), tc.name, b, f}, "-") t.Run(name, func(t *testing.T) { // Setup c := defaultConfig() @@ -134,7 +141,7 @@ func TestPlugin(t *testing.T) { paths := tc.mount(tc.name) mount(c, paths...) cacheKey(c, tc.cacheKey) - format(c, fmt) + format(c, f) // Rebuild run { @@ -148,13 +155,13 @@ func TestPlugin(t *testing.T) { } // Move source to compare later - dir, cleanup := test.CreateTempDir(t, sanitize(name), testRootMoved) + restoreRoot, cleanup := test.CreateTempDir(t, sanitize(name), testRootMoved) t.Cleanup(cleanup) for _, p := range paths { rel, err := filepath.Rel(testRootMounted, p) test.Ok(t, err) - dst := filepath.Join(dir, rel) + dst := filepath.Join(restoreRoot, rel) test.Ok(t, os.MkdirAll(filepath.Dir(dst), 0755)) test.Ok(t, os.Rename(p, dst)) } @@ -172,7 +179,7 @@ func TestPlugin(t *testing.T) { } // Compare - test.EqualDirs(t, dir, testRootMounted, paths) + test.EqualDirs(t, restoreRoot, testRootMounted, paths) }) } } @@ -261,6 +268,27 @@ func exampleFileTree(t *testing.T, name string, content []byte) []string { return []string{file, dir} } +func exampleNestedFileTree(t *testing.T, name string, content []byte) []string { + name = sanitize(name) + + dir, cleanup := test.CreateTempDir(t, name, testRootMounted) + t.Cleanup(cleanup) + + nestedFile, nestedFileClean := test.CreateTempFile(t, name, content, dir) + t.Cleanup(nestedFileClean) + + nestedDir, nestedDirClean := test.CreateTempFilesInDir(t, name, content, dir) + t.Cleanup(nestedDirClean) + + nestedDir1, nestedDirClean1 := test.CreateTempDir(t, name, dir) + t.Cleanup(nestedDirClean1) + + nestedFile1, nestedFileClean1 := test.CreateTempFile(t, name, content, nestedDir1) + t.Cleanup(nestedFileClean1) + + return []string{nestedDir, nestedFile, nestedFile1} +} + func exampleFileTreeWithSymlinks(t *testing.T, name string, content []byte) []string { name = sanitize(name) From a1c066276b63e512144f7607db68da50daf01e6e Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 11:24:38 +0200 Subject: [PATCH 12/16] Makes local and remote roots configurable --- .golangci.yml | 2 +- cache/rebuilder.go | 7 ++++--- cache/restorer.go | 7 ++++--- internal/plugin/config.go | 2 ++ internal/plugin/plugin.go | 22 +++++++++++++++++----- main.go | 13 ++++++++++++- 6 files changed, 40 insertions(+), 13 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 457dc1e0..e2c73f0c 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -18,6 +18,6 @@ linters-settings: lll: line-length: 120 funlen: - lines: 80 + lines: 85 statements: 45 diff --git a/cache/rebuilder.go b/cache/rebuilder.go index 2e8d03f9..bce9f4b2 100644 --- a/cache/rebuilder.go +++ b/cache/rebuilder.go @@ -46,8 +46,9 @@ func (r rebuilder) Rebuild(srcs []string) error { } var ( - wg sync.WaitGroup - errs = &internal.MultiError{} + wg sync.WaitGroup + errs = &internal.MultiError{} + namespace = filepath.ToSlash(filepath.Clean(r.namespace)) ) for _, src := range srcs { @@ -55,7 +56,7 @@ func (r rebuilder) Rebuild(srcs []string) error { return fmt.Errorf("source <%s>, make sure file or directory exists and readable %w", src, err) } - dst := filepath.Join(r.namespace, key, src) + dst := filepath.Join(namespace, key, src) level.Info(r.logger).Log("msg", "rebuilding cache for directory", "local", src, "remote", dst) diff --git a/cache/restorer.go b/cache/restorer.go index 60324493..e3bb3add 100644 --- a/cache/restorer.go +++ b/cache/restorer.go @@ -43,12 +43,13 @@ func (r restorer) Restore(dsts []string) error { } var ( - wg sync.WaitGroup - errs = &internal.MultiError{} + wg sync.WaitGroup + errs = &internal.MultiError{} + namespace = filepath.ToSlash(filepath.Clean(r.namespace)) ) for _, dst := range dsts { - src := filepath.Join(r.namespace, key, dst) + src := filepath.Join(namespace, key, dst) level.Info(r.logger).Log("msg", "restoring directory", "local", dst, "remote", src) diff --git a/internal/plugin/config.go b/internal/plugin/config.go index 297b0999..eb6bfcd0 100644 --- a/internal/plugin/config.go +++ b/internal/plugin/config.go @@ -15,6 +15,8 @@ type Config struct { ArchiveFormat string Backend string CacheKeyTemplate string + RemoteRoot string + LocalRoot string // Modes Debug bool diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go index 6580a016..33c702f5 100644 --- a/internal/plugin/plugin.go +++ b/internal/plugin/plugin.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "path/filepath" "github.com/meltwater/drone-cache/archive" "github.com/meltwater/drone-cache/cache" @@ -62,13 +63,24 @@ func (p *Plugin) Exec() error { return errors.New("rebuild and restore are mutually exclusive, please set only one of them") } - workspace, err := os.Getwd() - if err != nil { - return fmt.Errorf("get working directory %w", err) + var localRoot string + if p.Config.LocalRoot != "" { + localRoot = filepath.Clean(p.Config.LocalRoot) + } else { + workspace, err := os.Getwd() + if err != nil { + return fmt.Errorf("get working directory %w", err) + } + + localRoot = workspace } var options []cache.Option - options = append(options, cache.WithNamespace(p.Metadata.Repo.Name)) + if p.Config.RemoteRoot != "" { + options = append(options, cache.WithNamespace(p.Config.RemoteRoot)) + } else { + options = append(options, cache.WithNamespace(p.Metadata.Repo.Name)) + } var generator key.Generator if cfg.CacheKeyTemplate != "" { @@ -99,7 +111,7 @@ func (p *Plugin) Exec() error { // 3. Initialize cache. c := cache.New(p.logger, storage.New(p.logger, b, cfg.StorageOperationTimeout), - archive.FromFormat(p.logger, workspace, cfg.ArchiveFormat, + archive.FromFormat(p.logger, localRoot, cfg.ArchiveFormat, archive.WithSkipSymlinks(cfg.SkipSymlinks), archive.WithCompressionLevel(cfg.CompressionLevel), ), diff --git a/main.go b/main.go index ad4f7c0a..8b9d3f98 100644 --- a/main.go +++ b/main.go @@ -241,12 +241,21 @@ func main() { Usage: "restore the cache directories", EnvVars: []string{"PLUGIN_RESTORE"}, }, - // RESTORE &cli.StringFlag{ Name: "cache-key, chk", Usage: "cache key to use for the cache directories", EnvVars: []string{"PLUGIN_CACHE_KEY"}, }, + &cli.StringFlag{ + Name: "remote-root, rr", + Usage: "remote root directory to contain all the cache files created (default repo.name)", + EnvVars: []string{"PLUGIN_REMOTE_ROOT"}, + }, + &cli.StringFlag{ + Name: "local-root, lr", + Usage: "local root directory to base given mount paths (default pwd [present working directory])", + EnvVars: []string{"PLUGIN_LOCAL_ROOT"}, + }, // CACHE-KEYS // REBUILD-KEYS // RESTORE-KEYS @@ -498,6 +507,8 @@ func run(c *cli.Context) error { Mount: c.StringSlice("mount"), Rebuild: c.Bool("rebuild"), Restore: c.Bool("restore"), + RemoteRoot: c.String("remote-root"), + LocalRoot: c.String("local-root"), StorageOperationTimeout: c.Duration("backend.operation-timeout"), FileSystem: filesystem.Config{ From 067296f24d261f7b6bccec43f2437b79cf035b36 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 17:35:40 +0200 Subject: [PATCH 13/16] Improve error formating --- archive/gzip/gzip.go | 2 +- archive/tar/tar.go | 48 ++++++++++++------------ cache/flusher.go | 4 +- cache/rebuilder.go | 14 +++---- cache/restorer.go | 10 ++--- internal/io.go | 4 +- internal/plugin/plugin.go | 6 +-- internal/plugin/plugin_test.go | 2 +- key/generator/hash.go | 2 +- key/generator/metadata.go | 4 +- key/generator/util.go | 2 +- storage/backend/azure/azure.go | 10 ++--- storage/backend/backend.go | 2 +- storage/backend/filesystem/filesystem.go | 8 ++-- storage/backend/gcs/gcs.go | 12 +++--- storage/backend/s3/s3.go | 6 +-- storage/backend/sftp/sftp.go | 39 ++++++++++--------- 17 files changed, 87 insertions(+), 88 deletions(-) diff --git a/archive/gzip/gzip.go b/archive/gzip/gzip.go index 2f4fd251..2fc2628e 100644 --- a/archive/gzip/gzip.go +++ b/archive/gzip/gzip.go @@ -29,7 +29,7 @@ func New(logger log.Logger, root string, skipSymlinks bool, compressionLevel int func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { gw, err := gzip.NewWriterLevel(w, a.compressionLevel) if err != nil { - return 0, fmt.Errorf("create archive writer %w", err) + return 0, fmt.Errorf("create archive writer, %w", err) } defer internal.CloseWithErrLogf(a.logger, gw, "gzip writer") diff --git a/archive/tar/tar.go b/archive/tar/tar.go index 304f17ba..b612fdfd 100644 --- a/archive/tar/tar.go +++ b/archive/tar/tar.go @@ -45,11 +45,11 @@ func (a *Archive) Create(srcs []string, w io.Writer) (int64, error) { for _, src := range srcs { _, err := os.Lstat(src) if err != nil { - return written, fmt.Errorf("make sure file or directory readable <%s>: %v, %w", src, err, ErrSourceNotReachable) + return written, fmt.Errorf("make sure file or directory readable <%s>: %v,, %w", src, err, ErrSourceNotReachable) } if err := filepath.Walk(src, writeToArchive(tw, a.root, a.skipSymlinks, &written)); err != nil { - return written, fmt.Errorf("walk, add all files to archive %w", err) + return written, fmt.Errorf("walk, add all files to archive, %w", err) } } @@ -70,7 +70,7 @@ func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int // Create header for Regular files and Directories h, err := tar.FileInfoHeader(fi, fi.Name()) if err != nil { - return fmt.Errorf("create header for <%s> %w", path, err) + return fmt.Errorf("create header for <%s>, %w", path, err) } if fi.Mode()&os.ModeSymlink != 0 { // isSymbolic @@ -80,19 +80,19 @@ func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int var err error if h, err = createSymlinkHeader(fi, path); err != nil { - return fmt.Errorf("create header for symbolic link %w", err) + return fmt.Errorf("create header for symbolic link, %w", err) } } name, err := relative(root, path) if err != nil { - return fmt.Errorf("relative name <%s>: <%s> %w", path, root, err) + return fmt.Errorf("relative name <%s>: <%s>, %w", path, root, err) } h.Name = name if err := tw.WriteHeader(h); err != nil { - return fmt.Errorf("write header for <%s> %w", path, err) + return fmt.Errorf("write header for <%s>, %w", path, err) } if !fi.Mode().IsRegular() { @@ -101,7 +101,7 @@ func writeToArchive(tw *tar.Writer, root string, skipSymlinks bool, written *int n, err := writeFileToArchive(tw, path) if err != nil { - return fmt.Errorf("write file to archive %w", err) + return fmt.Errorf("write file to archive, %w", err) } *written += n @@ -118,7 +118,7 @@ func relative(parent string, path string) (string, error) { rel, err := filepath.Rel(parent, filepath.Dir(path)) if err != nil { - return "", fmt.Errorf("relative path <%s>, base <%s> %w", rel, name, err) + return "", fmt.Errorf("relative path <%s>, base <%s>, %w", rel, name, err) } // NOTICE: filepath.Rel puts "../" when given path is not under parent. @@ -130,12 +130,12 @@ func relative(parent string, path string) (string, error) { func createSymlinkHeader(fi os.FileInfo, path string) (*tar.Header, error) { lnk, err := os.Readlink(path) if err != nil { - return nil, fmt.Errorf("read link <%s> %w", path, err) + return nil, fmt.Errorf("read link <%s>, %w", path, err) } h, err := tar.FileInfoHeader(fi, lnk) if err != nil { - return nil, fmt.Errorf("create symlink header for <%s> %w", path, err) + return nil, fmt.Errorf("create symlink header for <%s>, %w", path, err) } return h, nil @@ -144,14 +144,14 @@ func createSymlinkHeader(fi os.FileInfo, path string) (*tar.Header, error) { func writeFileToArchive(tw io.Writer, path string) (n int64, err error) { f, err := os.Open(path) if err != nil { - return 0, fmt.Errorf("open file <%s> %w", path, err) + return 0, fmt.Errorf("open file <%s>, %w", path, err) } defer internal.CloseWithErrCapturef(&err, f, "write file to archive <%s>", path) written, err := io.Copy(tw, f) if err != nil { - return written, fmt.Errorf("copy the file <%s> data to the tarball %w", path, err) + return written, fmt.Errorf("copy the file <%s> data to the tarball, %w", path, err) } return written, nil @@ -182,14 +182,14 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { } else { name, err := relative(dst, h.Name) if err != nil { - return 0, fmt.Errorf("relative name %w", err) + return 0, fmt.Errorf("relative name, %w", err) } target = filepath.Join(dst, name) } if err := os.MkdirAll(filepath.Dir(target), defaultDirPermission); err != nil { - return 0, fmt.Errorf("ensure directory <%s> %w", target, err) + return 0, fmt.Errorf("ensure directory <%s>, %w", target, err) } switch h.Typeflag { @@ -204,19 +204,19 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { written += n if err != nil { - return written, fmt.Errorf("extract regular file %w", err) + return written, fmt.Errorf("extract regular file, %w", err) } continue case tar.TypeSymlink: if err := extractSymlink(h, target); err != nil { - return written, fmt.Errorf("extract symbolic link %w", err) + return written, fmt.Errorf("extract symbolic link, %w", err) } continue case tar.TypeLink: if err := extractLink(h, target); err != nil { - return written, fmt.Errorf("extract link %w", err) + return written, fmt.Errorf("extract link, %w", err) } continue @@ -230,7 +230,7 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { func extractDir(h *tar.Header, target string) error { if err := os.MkdirAll(target, os.FileMode(h.Mode)); err != nil { - return fmt.Errorf("create directory <%s> %w", target, err) + return fmt.Errorf("create directory <%s>, %w", target, err) } return nil @@ -239,14 +239,14 @@ func extractDir(h *tar.Header, target string) error { func extractRegular(h *tar.Header, tr io.Reader, target string) (n int64, err error) { f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(h.Mode)) if err != nil { - return 0, fmt.Errorf("open extracted file for writing <%s> %w", target, err) + return 0, fmt.Errorf("open extracted file for writing <%s>, %w", target, err) } defer internal.CloseWithErrCapturef(&err, f, "extract regular <%s>", target) written, err := io.Copy(f, tr) if err != nil { - return written, fmt.Errorf("copy extracted file for writing <%s> %w", target, err) + return written, fmt.Errorf("copy extracted file for writing <%s>, %w", target, err) } return written, nil @@ -254,11 +254,11 @@ func extractRegular(h *tar.Header, tr io.Reader, target string) (n int64, err er func extractSymlink(h *tar.Header, target string) error { if err := unlink(target); err != nil { - return fmt.Errorf("unlink <%s> %w", target, err) + return fmt.Errorf("unlink <%s>, %w", target, err) } if err := os.Symlink(h.Linkname, target); err != nil { - return fmt.Errorf("create symbolic link <%s> %w", target, err) + return fmt.Errorf("create symbolic link <%s>, %w", target, err) } return nil @@ -266,11 +266,11 @@ func extractSymlink(h *tar.Header, target string) error { func extractLink(h *tar.Header, target string) error { if err := unlink(target); err != nil { - return fmt.Errorf("unlink <%s> %w", target, err) + return fmt.Errorf("unlink <%s>, %w", target, err) } if err := os.Link(h.Linkname, target); err != nil { - return fmt.Errorf("create hard link <%s> %w", h.Linkname, err) + return fmt.Errorf("create hard link <%s>, %w", h.Linkname, err) } return nil diff --git a/cache/flusher.go b/cache/flusher.go index 79875984..458c2e0c 100644 --- a/cache/flusher.go +++ b/cache/flusher.go @@ -30,14 +30,14 @@ func (f flusher) Flush(srcs []string) error { files, err := f.store.List(src) if err != nil { - return fmt.Errorf("flusher list %w", err) + return fmt.Errorf("flusher list, %w", err) } for _, file := range files { if f.dirty(file) { err := f.store.Delete(file.Path) if err != nil { - return fmt.Errorf("flusher delete %w", err) + return fmt.Errorf("flusher delete, %w", err) } } } diff --git a/cache/rebuilder.go b/cache/rebuilder.go index bce9f4b2..f531d96e 100644 --- a/cache/rebuilder.go +++ b/cache/rebuilder.go @@ -42,7 +42,7 @@ func (r rebuilder) Rebuild(srcs []string) error { key, err := r.generateKey() if err != nil { - return fmt.Errorf("generate key %w", err) + return fmt.Errorf("generate key, %w", err) } var ( @@ -53,7 +53,7 @@ func (r rebuilder) Rebuild(srcs []string) error { for _, src := range srcs { if _, err := os.Lstat(src); err != nil { - return fmt.Errorf("source <%s>, make sure file or directory exists and readable %w", src, err) + return fmt.Errorf("source <%s>, make sure file or directory exists and readable, %w", src, err) } dst := filepath.Join(namespace, key, src) @@ -66,7 +66,7 @@ func (r rebuilder) Rebuild(srcs []string) error { defer wg.Done() if err := r.rebuild(src, dst); err != nil { - errs.Add(fmt.Errorf("upload from <%s> to <%s> %w", src, dst, err)) + errs.Add(fmt.Errorf("upload from <%s> to <%s>, %w", src, dst, err)) } }(dst, src) } @@ -74,7 +74,7 @@ func (r rebuilder) Rebuild(srcs []string) error { wg.Wait() if errs.Err() != nil { - return fmt.Errorf("rebuild failed %w", errs) + return fmt.Errorf("rebuild failed, %w", errs) } level.Info(r.logger).Log("msg", "cache built", "took", time.Since(now)) @@ -86,7 +86,7 @@ func (r rebuilder) Rebuild(srcs []string) error { func (r rebuilder) rebuild(src, dst string) (err error) { src, err = filepath.Abs(filepath.Clean(src)) if err != nil { - return fmt.Errorf("clean source path %w", err) + return fmt.Errorf("clean source path, %w", err) } pr, pw := io.Pipe() @@ -101,7 +101,7 @@ func (r rebuilder) rebuild(src, dst string) (err error) { written, err := r.a.Create([]string{src}, pw) if err != nil { - if err := pw.CloseWithError(fmt.Errorf("archive write, pipe writer failed %w", err)); err != nil { + if err := pw.CloseWithError(fmt.Errorf("archive write, pipe writer failed, %w", err)); err != nil { level.Error(r.logger).Log("msg", "pw close", "err", err) } } @@ -115,7 +115,7 @@ func (r rebuilder) rebuild(src, dst string) (err error) { tr := io.TeeReader(pr, sw) if err := r.s.Put(dst, tr); err != nil { - err = fmt.Errorf("upload file, pipe reader failed %w", err) + err = fmt.Errorf("upload file, pipe reader failed, %w", err) if err := pr.CloseWithError(err); err != nil { level.Error(r.logger).Log("msg", "pr close", "err", err) } diff --git a/cache/restorer.go b/cache/restorer.go index e3bb3add..4895e1a0 100644 --- a/cache/restorer.go +++ b/cache/restorer.go @@ -39,7 +39,7 @@ func (r restorer) Restore(dsts []string) error { key, err := r.generateKey() if err != nil { - return fmt.Errorf("generate key %w", err) + return fmt.Errorf("generate key, %w", err) } var ( @@ -59,7 +59,7 @@ func (r restorer) Restore(dsts []string) error { defer wg.Done() if err := r.restore(src, dst); err != nil { - errs.Add(fmt.Errorf("download from <%s> to <%s> %w", src, dst, err)) + errs.Add(fmt.Errorf("download from <%s> to <%s>, %w", src, dst, err)) } }(src, dst) } @@ -67,7 +67,7 @@ func (r restorer) Restore(dsts []string) error { wg.Wait() if errs.Err() != nil { - return fmt.Errorf("restore failed %w", errs) + return fmt.Errorf("restore failed, %w", errs) } level.Info(r.logger).Log("msg", "cache restored", "took", time.Since(now)) @@ -86,7 +86,7 @@ func (r restorer) restore(src, dst string) (err error) { level.Info(r.logger).Log("msg", "downloading archived directory", "remote", src, "local", dst) if err := r.s.Get(src, pw); err != nil { - if err := pw.CloseWithError(fmt.Errorf("get file from storage backend, pipe writer failed %w", err)); err != nil { + if err := pw.CloseWithError(fmt.Errorf("get file from storage backend, pipe writer failed, %w", err)); err != nil { level.Error(r.logger).Log("msg", "pw close", "err", err) } } @@ -96,7 +96,7 @@ func (r restorer) restore(src, dst string) (err error) { written, err := r.a.Extract(dst, pr) if err != nil { - err = fmt.Errorf("extract files from downloaded archive, pipe reader failed %w", err) + err = fmt.Errorf("extract files from downloaded archive, pipe reader failed, %w", err) if err := pr.CloseWithError(err); err != nil { level.Error(r.logger).Log("msg", "pr close", "err", err) } diff --git a/internal/io.go b/internal/io.go index 62f3e840..85d60707 100644 --- a/internal/io.go +++ b/internal/io.go @@ -21,7 +21,7 @@ func CloseWithErrLogf(logger log.Logger, closer io.Closer, format string, a ...i logger = log.NewLogfmtLogger(os.Stderr) } - level.Warn(logger).Log("msg", "detected close error", "err", fmt.Errorf(format+" %w", append(a, err)...)) + level.Warn(logger).Log("msg", "detected close error", "err", fmt.Errorf(format+", %w", append(a, err)...)) } // CloseWithErrCapturef runs function and on error return error by argument including the given error.. @@ -34,7 +34,7 @@ func CloseWithErrCapturef(err *error, closer io.Closer, format string, a ...inte mErr := MultiError{} mErr.Add(*err) - mErr.Add(fmt.Errorf(format+" %w", append(a, cErr)...)) + mErr.Add(fmt.Errorf(format+", %w", append(a, cErr)...)) *err = mErr.Err() return diff --git a/internal/plugin/plugin.go b/internal/plugin/plugin.go index 33c702f5..d58c10e2 100644 --- a/internal/plugin/plugin.go +++ b/internal/plugin/plugin.go @@ -69,7 +69,7 @@ func (p *Plugin) Exec() error { } else { workspace, err := os.Getwd() if err != nil { - return fmt.Errorf("get working directory %w", err) + return fmt.Errorf("get working directory, %w", err) } localRoot = workspace @@ -86,7 +86,7 @@ func (p *Plugin) Exec() error { if cfg.CacheKeyTemplate != "" { generator = keygen.NewMetadata(p.logger, cfg.CacheKeyTemplate, p.Metadata) if err := generator.Check(); err != nil { - return fmt.Errorf("parse failed, falling back to default %w", err) + return fmt.Errorf("parse failed, falling back to default, %w", err) } options = append(options, cache.WithFallbackGenerator(keygen.NewHash(p.Metadata.Commit.Branch))) @@ -105,7 +105,7 @@ func (p *Plugin) Exec() error { SFTP: cfg.SFTP, }) if err != nil { - return fmt.Errorf("initialize backend <%s> %w", cfg.Backend, err) + return fmt.Errorf("initialize backend <%s>, %w", cfg.Backend, err) } // 3. Initialize cache. diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index c7aade46..ad3744bd 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -235,7 +235,7 @@ func newPlugin(c *Config) Plugin { Metadata: metadata.Metadata{ Repo: metadata.Repo{ Branch: "master", - Name: "drone-cache", + Name: repoName, }, Commit: metadata.Commit{ Branch: "master", diff --git a/key/generator/hash.go b/key/generator/hash.go index caab155e..cbe2bdc2 100644 --- a/key/generator/hash.go +++ b/key/generator/hash.go @@ -20,7 +20,7 @@ func NewHash(defaultParts ...string) *Hash { func (h *Hash) Generate(parts ...string) (string, error) { key, err := hash(append(parts, h.defaultParts...)...) if err != nil { - return "", fmt.Errorf("generate hash key for mounted %w", err) + return "", fmt.Errorf("generate hash key for mounted, %w", err) } return key, nil diff --git a/key/generator/metadata.go b/key/generator/metadata.go index ed5f2ec2..a7a4fd88 100644 --- a/key/generator/metadata.go +++ b/key/generator/metadata.go @@ -53,14 +53,14 @@ func (g *Metadata) Generate(_ ...string) (string, error) { t, err := g.parseTemplate() if err != nil { - return "", fmt.Errorf("parse, <%s> as cache key template, falling back to default %w", g.tmpl, err) + return "", fmt.Errorf("parse, <%s> as cache key template, falling back to default, %w", g.tmpl, err) } var b strings.Builder err = t.Execute(&b, g.data) if err != nil { - return "", fmt.Errorf("build, <%s> as cache key, falling back to default %w", g.tmpl, err) + return "", fmt.Errorf("build, <%s> as cache key, falling back to default, %w", g.tmpl, err) } return b.String(), nil diff --git a/key/generator/util.go b/key/generator/util.go index 42592334..3b248872 100644 --- a/key/generator/util.go +++ b/key/generator/util.go @@ -13,7 +13,7 @@ func readerHasher(readers ...io.Reader) (string, error) { for _, r := range readers { if _, err := io.Copy(h, r); err != nil { - return "", fmt.Errorf("write reader as hash %w", err) + return "", fmt.Errorf("write reader as hash, %w", err) } } diff --git a/storage/backend/azure/azure.go b/storage/backend/azure/azure.go index 2be21b7e..8b255615 100644 --- a/storage/backend/azure/azure.go +++ b/storage/backend/azure/azure.go @@ -38,7 +38,7 @@ func New(l log.Logger, c Config) (*Backend, error) { // 2. Create a default request pipeline using your storage account name and account key. credential, err := azblob.NewSharedKeyCredential(c.AccountName, c.AccountKey) if err != nil { - return nil, fmt.Errorf("azure, invalid credentials %w", err) + return nil, fmt.Errorf("azure, invalid credentials, %w", err) } // 3. Azurite has different URL pattern than production Azure Blob Storage. @@ -64,7 +64,7 @@ func New(l log.Logger, c Config) (*Backend, error) { if err != nil { ret, ok := err.(azblob.StorageError) if !ok { - return nil, fmt.Errorf("azure, unexpected error %w", err) + return nil, fmt.Errorf("azure, unexpected error, %w", err) } if ret.ServiceCode() == "ContainerAlreadyExists" { @@ -86,7 +86,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) (err error) { resp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) if err != nil { - errCh <- fmt.Errorf("get the object %w", err) + errCh <- fmt.Errorf("get the object, %w", err) return } @@ -95,7 +95,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) (err error) { _, err = io.Copy(w, rc) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } }() @@ -118,7 +118,7 @@ func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { MaxBuffers: defaultMaxBuffers, }, ); err != nil { - return fmt.Errorf("put the object %w", err) + return fmt.Errorf("put the object, %w", err) } return nil diff --git a/storage/backend/backend.go b/storage/backend/backend.go index d1401b02..0ce775fd 100644 --- a/storage/backend/backend.go +++ b/storage/backend/backend.go @@ -82,7 +82,7 @@ func FromConfig(l log.Logger, backedType string, cfg Config) (Backend, error) { } if err != nil { - return nil, fmt.Errorf("initialize backend %w", err) + return nil, fmt.Errorf("initialize backend, %w", err) } return b, nil diff --git a/storage/backend/filesystem/filesystem.go b/storage/backend/filesystem/filesystem.go index b9157723..ed27d310 100644 --- a/storage/backend/filesystem/filesystem.go +++ b/storage/backend/filesystem/filesystem.go @@ -31,7 +31,7 @@ func New(l log.Logger, c Config) (*Backend, error) { //nolint: TODO(kakkoyun): Should it be created? if _, err := os.Stat(c.CacheRoot); err != nil { - return nil, fmt.Errorf("make sure volume is mounted, <%s> as cache root %w", c.CacheRoot, err) + return nil, fmt.Errorf("make sure volume is mounted, <%s> as cache root, %w", c.CacheRoot, err) } level.Debug(l).Log("msg", "Filesystem backend", "config", fmt.Sprintf("%#v", c)) @@ -43,7 +43,7 @@ func New(l log.Logger, c Config) (*Backend, error) { func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) if err != nil { - return fmt.Errorf("absolute path %w", err) + return fmt.Errorf("absolute path, %w", err) } errCh := make(chan error) @@ -53,7 +53,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { rc, err := os.Open(path) if err != nil { - errCh <- fmt.Errorf("get the object %w", err) + errCh <- fmt.Errorf("get the object, %w", err) return } @@ -61,7 +61,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { _, err = io.Copy(w, rc) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } }() diff --git a/storage/backend/gcs/gcs.go b/storage/backend/gcs/gcs.go index 092a11a1..cc2ebeed 100644 --- a/storage/backend/gcs/gcs.go +++ b/storage/backend/gcs/gcs.go @@ -51,7 +51,7 @@ func New(l log.Logger, c Config) (*Backend, error) { client, err := gcstorage.NewClient(ctx, opts...) if err != nil { - return nil, fmt.Errorf("gcs client initialization %w", err) + return nil, fmt.Errorf("gcs client initialization, %w", err) } return &Backend{ @@ -79,7 +79,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { r, err := obj.NewReader(ctx) if err != nil { - errCh <- fmt.Errorf("get the object %w", err) + errCh <- fmt.Errorf("get the object, %w", err) return } @@ -87,7 +87,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { _, err = io.Copy(w, r) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } }() @@ -118,16 +118,16 @@ func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { _, err := io.Copy(w, r) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } if err := w.Close(); err != nil { - errCh <- fmt.Errorf("close the object %w", err) + errCh <- fmt.Errorf("close the object, %w", err) } if b.acl != "" { if err := obj.ACL().Set(ctx, gcstorage.AllAuthenticatedUsers, gcstorage.ACLRole(b.acl)); err != nil { - errCh <- fmt.Errorf("set ACL of the object %w", err) + errCh <- fmt.Errorf("set ACL of the object, %w", err) } } }() diff --git a/storage/backend/s3/s3.go b/storage/backend/s3/s3.go index ec397189..061c08e0 100644 --- a/storage/backend/s3/s3.go +++ b/storage/backend/s3/s3.go @@ -72,7 +72,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { out, err := b.client.GetObjectWithContext(ctx, in) if err != nil { - errCh <- fmt.Errorf("get the object %w", err) + errCh <- fmt.Errorf("get the object, %w", err) return } @@ -80,7 +80,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { _, err = io.Copy(w, out.Body) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } }() @@ -109,7 +109,7 @@ func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { } if _, err := uploader.UploadWithContext(ctx, in); err != nil { - return fmt.Errorf("put the object %w", err) + return fmt.Errorf("put the object, %w", err) } return nil diff --git a/storage/backend/sftp/sftp.go b/storage/backend/sftp/sftp.go index 44cbad49..922facc8 100644 --- a/storage/backend/sftp/sftp.go +++ b/storage/backend/sftp/sftp.go @@ -28,7 +28,7 @@ type Backend struct { func New(l log.Logger, c Config) (*Backend, error) { authMethod, err := authMethod(c) if err != nil { - return nil, fmt.Errorf("unable to get ssh auth method %w", err) + return nil, fmt.Errorf("unable to get ssh auth method, %w", err) } /* #nosec */ @@ -39,18 +39,18 @@ func New(l log.Logger, c Config) (*Backend, error) { Timeout: c.Timeout, }) if err != nil { - return nil, fmt.Errorf("unable to connect to ssh %w", err) + return nil, fmt.Errorf("unable to connect to ssh, %w", err) } client, err := sftp.NewClient(sshClient) if err != nil { sshClient.Close() - return nil, fmt.Errorf("unable to connect to ssh with sftp protocol %w", err) + return nil, fmt.Errorf("unable to connect to ssh with sftp protocol, %w", err) } //nolint: TODO(kakkoyun): Should it be created? if _, err := client.Stat(c.CacheRoot); err != nil { - return nil, fmt.Errorf("make sure cache root <%s> created, %w", c.CacheRoot, err) + return nil, fmt.Errorf("make sure cache root <%s> created, %w", c.CacheRoot, err) } level.Debug(l).Log("msg", "sftp backend", "config", fmt.Sprintf("%#v", c)) @@ -62,7 +62,7 @@ func New(l log.Logger, c Config) (*Backend, error) { func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) if err != nil { - return fmt.Errorf("generate absolute path %w", err) + return fmt.Errorf("generate absolute path, %w", err) } errCh := make(chan error) @@ -72,7 +72,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { rc, err := b.client.Open(path) if err != nil { - errCh <- fmt.Errorf("get the object %w", err) + errCh <- fmt.Errorf("get the object, %w", err) return } @@ -80,7 +80,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { _, err = io.Copy(w, rc) if err != nil { - errCh <- fmt.Errorf("copy the object %w", err) + errCh <- fmt.Errorf("copy the object, %w", err) } }() @@ -96,31 +96,30 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { errCh := make(chan error) - go func() { - defer close(errCh) + path := filepath.Clean(filepath.Join(b.cacheRoot, p)) - path := filepath.Clean(filepath.Join(b.cacheRoot, p)) + dir := filepath.Dir(path) + if err := b.client.MkdirAll(dir); err != nil { + return fmt.Errorf("create directory, %w", err) + } - dir := filepath.Dir(path) - if err := b.client.MkdirAll(dir); err != nil { - errCh <- fmt.Errorf("create directory %w", err) - return - } + go func() { + defer close(errCh) w, err := b.client.Create(path) if err != nil { - errCh <- fmt.Errorf("create cache file %w", err) + errCh <- fmt.Errorf("create cache file, %w", err) return } defer internal.CloseWithErrLogf(b.logger, w, "writer close defer") if _, err := io.Copy(w, r); err != nil { - errCh <- fmt.Errorf("write contents of reader to a file %w", err) + errCh <- fmt.Errorf("write contents of reader to a file, %w", err) } if err := w.Close(); err != nil { - errCh <- fmt.Errorf("close the object %w", err) + errCh <- fmt.Errorf("close the object, %w", err) } }() @@ -149,12 +148,12 @@ func authMethod(c Config) ([]ssh.AuthMethod, error) { func readPublicKeyFile(file string) (ssh.AuthMethod, error) { buffer, err := ioutil.ReadFile(file) if err != nil { - return nil, fmt.Errorf("unable to read file %w", err) + return nil, fmt.Errorf("unable to read file, %w", err) } key, err := ssh.ParsePrivateKey(buffer) if err != nil { - return nil, fmt.Errorf("unable to parse private key %w", err) + return nil, fmt.Errorf("unable to parse private key, %w", err) } return ssh.PublicKeys(key), nil From 294ce47ee084e9de0416f01ce3956d9b94577daa Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 17:48:28 +0200 Subject: [PATCH 14/16] Stabilize CI --- .drone.yml | 6 +++--- Makefile | 2 +- archive/tar/tar.go | 2 +- docker-compose.yml | 7 +++++-- internal/plugin/plugin_test.go | 8 ++++---- storage/backend/filesystem/filesystem.go | 18 +++++++++--------- storage/backend/sftp/sftp.go | 15 ++++++++------- storage/backend/sftp/sftp_test.go | 2 +- 8 files changed, 32 insertions(+), 28 deletions(-) diff --git a/.drone.yml b/.drone.yml index 48c5f0ba..b9757637 100644 --- a/.drone.yml +++ b/.drone.yml @@ -41,7 +41,7 @@ steps: - name: test image: golang:1.14.1-alpine3.11 commands: - - go test -mod=vendor -short -cover -tags=integration ./... + - go test -mod=vendor -short -cover -failfast -tags=integration ./... environment: CGO_ENABLED: 0 TEST_S3_ENDPOINT: minio:9000 @@ -290,11 +290,11 @@ services: commands: - fake-gcs-server -public-host fakegcs -scheme http - name: sftp - image: atmoz/sftp + image: atmoz/sftp:alpine ports: - 22 commands: - - /entrypoint foo:pass:::upload + - /entrypoint foo:pass:::sftp_test bar:pass:::plugin_test - name: azurite image: mcr.microsoft.com/azure-storage/azurite commands: diff --git a/Makefile b/Makefile index 22633b04..da268c4f 100644 --- a/Makefile +++ b/Makefile @@ -97,7 +97,7 @@ container-push-dev: container-dev .PHONY: test test: $(GOTEST_BIN) docker-compose up -d && sleep 1 - -$(GOTEST_BIN) -failfast -race -short -tags=integration ./... + -$(GOTEST_BIN) -race -short -cover -failfast -tags=integration ./... docker-compose down -v .PHONY: test-integration diff --git a/archive/tar/tar.go b/archive/tar/tar.go index b612fdfd..18f16d7d 100644 --- a/archive/tar/tar.go +++ b/archive/tar/tar.go @@ -171,7 +171,7 @@ func (a *Archive) Extract(dst string, r io.Reader) (int64, error) { case err == io.EOF: // if no more files are found return return written, nil case err != nil: // return any other error - return written, fmt.Errorf("tar reader %v: %w", err, ErrArchiveNotReadable) + return written, fmt.Errorf("tar reader <%v>, %w", err, ErrArchiveNotReadable) case h == nil: // if the header is nil, skip it continue } diff --git a/docker-compose.yml b/docker-compose.yml index 91b2df02..901aab82 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,10 +15,13 @@ services: - 4443:4443 command: -public-host localhost -scheme http sftp: - image: atmoz/sftp + image: atmoz/sftp:alpine ports: - "22:22" - command: foo:pass:::upload + volumes: + - ./testdata/sftp_test:/home/foo/sftp_test + - ./testdata/plugin_test:/home/bar/plugin_test + command: foo:pass:::sftp_test bar:pass:::plugin_test azurite: image: mcr.microsoft.com/azure-storage/azurite ports: diff --git a/internal/plugin/plugin_test.go b/internal/plugin/plugin_test.go index ad3744bd..25348e7e 100644 --- a/internal/plugin/plugin_test.go +++ b/internal/plugin/plugin_test.go @@ -41,6 +41,7 @@ const ( testRootMoved = "testdata/moved" defaultStorageOperationTimeout = 5 * time.Second defaultPublicHost = "localhost:4443" + repoName = "drone-cache" ) var publicHost = getEnv("TEST_STORAGE_EMULATOR_HOST", defaultPublicHost) @@ -423,9 +424,9 @@ func setupSFTP(t *testing.T, c *Config, name string) { const ( defaultSFTPHost = "127.0.0.1" defaultSFTPPort = "22" - defaultUsername = "foo" + defaultUsername = "bar" defaultPassword = "pass" - defaultCacheRoot = "/upload" + defaultCacheRoot = "/plugin_test" ) var ( @@ -447,8 +448,7 @@ func setupSFTP(t *testing.T, c *Config, name string) { client, err := pkgsftp.NewClient(sshClient) test.Ok(t, err) - test.Ok(t, client.MkdirAll(cacheRoot)) - t.Cleanup(func() { client.RemoveDirectory(cacheRoot) }) + test.Ok(t, client.MkdirAll(filepath.Join(cacheRoot, repoName))) c.Backend = backend.SFTP c.SFTP = sftp.Config{ diff --git a/storage/backend/filesystem/filesystem.go b/storage/backend/filesystem/filesystem.go index ed27d310..e87a016a 100644 --- a/storage/backend/filesystem/filesystem.go +++ b/storage/backend/filesystem/filesystem.go @@ -26,16 +26,16 @@ type Backend struct { // New creates a Backend backend. func New(l log.Logger, c Config) (*Backend, error) { if strings.TrimRight(path.Clean(c.CacheRoot), "/") == "" { - return nil, fmt.Errorf("empty or root path given, <%s> as cache root, ", c.CacheRoot) + return nil, fmt.Errorf("empty or root path given, <%s> as cache root", c.CacheRoot) } + level.Debug(l).Log("msg", "Filesystem backend", "config", fmt.Sprintf("%#v", c)) + //nolint: TODO(kakkoyun): Should it be created? if _, err := os.Stat(c.CacheRoot); err != nil { return nil, fmt.Errorf("make sure volume is mounted, <%s> as cache root, %w", c.CacheRoot, err) } - level.Debug(l).Log("msg", "Filesystem backend", "config", fmt.Sprintf("%#v", c)) - return &Backend{logger: l, cacheRoot: c.CacheRoot}, nil } @@ -62,6 +62,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { _, err = io.Copy(w, rc) if err != nil { errCh <- fmt.Errorf("copy the object, %w", err) + return } }() @@ -77,7 +78,7 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { path, err := filepath.Abs(filepath.Clean(filepath.Join(b.cacheRoot, p))) if err != nil { - return fmt.Errorf("build path %w", err) + return fmt.Errorf("build path, %w", err) } errCh := make(chan error) @@ -87,24 +88,23 @@ func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { dir := filepath.Dir(path) if err := os.MkdirAll(dir, os.FileMode(defaultFileMode)); err != nil { - errCh <- fmt.Errorf("create directory %w", err) - return + errCh <- fmt.Errorf("create directory, %w", err) } w, err := os.Create(path) if err != nil { - errCh <- fmt.Errorf("create cache file %w", err) + errCh <- fmt.Errorf("create cache file, %w", err) return } defer internal.CloseWithErrLogf(b.logger, w, "file writer, close defer") if _, err := io.Copy(w, r); err != nil { - errCh <- fmt.Errorf("write contents of reader to a file %w", err) + errCh <- fmt.Errorf("write contents of reader to a file, %w", err) } if err := w.Close(); err != nil { - errCh <- fmt.Errorf("close the object %w", err) + errCh <- fmt.Errorf("close the object, %w", err) } }() diff --git a/storage/backend/sftp/sftp.go b/storage/backend/sftp/sftp.go index 922facc8..f159e4a3 100644 --- a/storage/backend/sftp/sftp.go +++ b/storage/backend/sftp/sftp.go @@ -96,16 +96,17 @@ func (b *Backend) Get(ctx context.Context, p string, w io.Writer) error { func (b *Backend) Put(ctx context.Context, p string, r io.Reader) error { errCh := make(chan error) - path := filepath.Clean(filepath.Join(b.cacheRoot, p)) - - dir := filepath.Dir(path) - if err := b.client.MkdirAll(dir); err != nil { - return fmt.Errorf("create directory, %w", err) - } - go func() { defer close(errCh) + path := filepath.Clean(filepath.Join(b.cacheRoot, p)) + + dir := filepath.Dir(path) + if err := b.client.MkdirAll(dir); err != nil { + errCh <- fmt.Errorf("create directory, %w", err) + return + } + w, err := b.client.Create(path) if err != nil { errCh <- fmt.Errorf("create cache file, %w", err) diff --git a/storage/backend/sftp/sftp_test.go b/storage/backend/sftp/sftp_test.go index 51f3ad6e..0f80bfc1 100644 --- a/storage/backend/sftp/sftp_test.go +++ b/storage/backend/sftp/sftp_test.go @@ -20,7 +20,7 @@ const ( defaultSFTPPort = "22" defaultUsername = "foo" defaultPassword = "pass" - defaultCacheRoot = "/upload" + defaultCacheRoot = "/sftp_test" ) var ( From 5164d71c9e5ce3bac50f40c5173eb678136fa7ee Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 19:48:00 +0200 Subject: [PATCH 15/16] Upgrade goreleaser to latest --- .drone.yml | 6 +++--- Makefile | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.drone.yml b/.drone.yml index b9757637..bec9044a 100644 --- a/.drone.yml +++ b/.drone.yml @@ -54,7 +54,7 @@ steps: path: /drone/src/tmp/testdata/cache - name: release-snapshot-dev - image: goreleaser/goreleaser:v0.120 + image: goreleaser/goreleaser:v0.131.1 commands: - apk add --update make upx - goreleaser release --rm-dist --snapshot @@ -151,7 +151,7 @@ steps: path: /tmp/cache - name: release-snapshot - image: goreleaser/goreleaser:v0.120 + image: goreleaser/goreleaser:v0.131.1 commands: - apk add --update make upx - goreleaser release --rm-dist --snapshot @@ -330,7 +330,7 @@ steps: - git fetch --tags - name: release - image: goreleaser/goreleaser:v0.120 + image: goreleaser/goreleaser:v0.131.1 commands: - apk add --update make upx - make release diff --git a/Makefile b/Makefile index da268c4f..f072fdb4 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ GOLANGCI_LINT_VERSION=v1.21.0 GOLANGCI_LINT_BIN=$(GOPATH)/bin/golangci-lint EMBEDMD_BIN=$(GOPATH)/bin/embedmd GOTEST_BIN=$(GOPATH)/bin/gotest -GORELEASER_VERSION=v0.120 +GORELEASER_VERSION=v0.131.1 GORELEASER_BIN=$(GOPATH)/bin/goreleaser LICHE_BIN=$(GOPATH)/bin/liche @@ -142,7 +142,7 @@ $(GOLANGCI_LINT_BIN): $(GORELEASER_BIN): curl -sfL https://install.goreleaser.com/github.com/goreleaser/goreleaser.sh \ - | VERSION=${GORELEASER_VERSION} sh -s -- -b $(GOPATH)/bin $(GORELEASER_BIN) + | VERSION=${GORELEASER_VERSION} sh -s -- -b $(GOPATH)/bin ${GORELEASER_VERSION} ${LICHE_BIN}: GO111MODULE=on go get -u github.com/raviqqe/liche From 718e540aa1401d14d74e310e6b00b4baa3b21089 Mon Sep 17 00:00:00 2001 From: Kemal Akkoyun Date: Sun, 5 Apr 2020 19:48:15 +0200 Subject: [PATCH 16/16] Fix minor flag issues --- main.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/main.go b/main.go index 8b9d3f98..1253778b 100644 --- a/main.go +++ b/main.go @@ -280,7 +280,7 @@ func main() { &cli.BoolFlag{ Name: "debug, d", Usage: "debug", - EnvVars: []string{"PLUGIN_DEBUG, DEBUG"}, + EnvVars: []string{"PLUGIN_DEBUG", "DEBUG"}, }, &cli.BoolFlag{ Name: "exit-code, ex", @@ -512,7 +512,7 @@ func run(c *cli.Context) error { StorageOperationTimeout: c.Duration("backend.operation-timeout"), FileSystem: filesystem.Config{ - CacheRoot: c.String("filesystem-cache-root"), + CacheRoot: c.String("filesystem.cache-root"), }, S3: s3.Config{ ACL: c.String("acl"),