diff --git a/miniogw/gateway.go b/miniogw/gateway.go index 0556b5c..c90c746 100644 --- a/miniogw/gateway.go +++ b/miniogw/gateway.go @@ -635,7 +635,9 @@ func (layer *gatewayLayer) PutObject(ctx context.Context, bucket, object string, func (layer *gatewayLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, destOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { defer mon.Task()(&ctx)(&err) - if layer.compatibilityConfig.DisableCopyObject { + srcAndDestSame := srcBucket == destBucket && srcObject == destObject + + if layer.compatibilityConfig.DisableCopyObject && !srcAndDestSame { // Note: In production Gateway-MT, we want to return Not Implemented until we implement server-side copy return minio.ObjectInfo{}, minio.NotImplemented{API: "CopyObject"} } @@ -674,10 +676,24 @@ func (layer *gatewayLayer) CopyObject(ctx context.Context, srcBucket, srcObject, } } - if srcBucket == destBucket && srcObject == destObject { - // Source and destination are the same. Do nothing, otherwise copying - // the same object over itself may destroy it, especially if it is a - // larger one. + if srcAndDestSame { + // Source and destination are the same. Do nothing, apart from ensuring + // metadata is updated. Tools like rclone sync use CopyObject with the + // same source and destination as a way of updating existing metadata + // like last modified date/time, as there's no S3 endpoint for + // manipulating existing object's metadata. + info, err := project.StatObject(ctx, srcBucket, srcObject) + if err != nil { + return minio.ObjectInfo{}, convertError(err, srcBucket, srcObject) + } + + upsertObjectMetadata(srcInfo.UserDefined, info.Custom) + + err = project.UpdateObjectMetadata(ctx, srcBucket, srcObject, srcInfo.UserDefined, nil) + if err != nil { + return minio.ObjectInfo{}, convertError(err, srcBucket, srcObject) + } + return srcInfo, nil } @@ -697,28 +713,7 @@ func (layer *gatewayLayer) CopyObject(ctx context.Context, srcBucket, srcObject, info := download.Info() - // if X-Amz-Metadata-Directive header is provided and is set to "REPLACE", - // then srcInfo.UserDefined will contain new metadata from the copy request. - // If the directive is "COPY", or undefined, the source object's metadata - // will be contained in srcInfo.UserDefined instead. This is done by the - // caller handler in minio. - - // if X-Amz-Tagging-Directive is set to "REPLACE", we need to set the copied - // object's tags to the tags provided in the copy request. If the directive - // is "COPY", or undefined, copy over any existing tags from the source - // object. - if td, ok := srcInfo.UserDefined[xhttp.AmzTagDirective]; ok && td == "REPLACE" { - srcInfo.UserDefined["s3:tags"] = srcInfo.UserDefined[xhttp.AmzObjectTagging] - } else if tags, ok := info.Custom["s3:tags"]; ok { - srcInfo.UserDefined["s3:tags"] = tags - } - - delete(srcInfo.UserDefined, xhttp.AmzObjectTagging) - delete(srcInfo.UserDefined, xhttp.AmzTagDirective) - - // if X-Amz-Metadata-Directive header is set to "REPLACE", then - // srcInfo.UserDefined will be missing s3:etag, so make sure it's copied. - srcInfo.UserDefined["s3:etag"] = info.Custom["s3:etag"] + upsertObjectMetadata(srcInfo.UserDefined, info.Custom) err = upload.SetCustomMetadata(ctx, srcInfo.UserDefined) if err != nil { @@ -887,6 +882,31 @@ func projectFromContext(ctx context.Context, bucket, object string) (*uplink.Pro return pr, nil } +func upsertObjectMetadata(metadata map[string]string, existingMetadata uplink.CustomMetadata) { + // if X-Amz-Metadata-Directive header is provided and is set to "REPLACE", + // then srcInfo.UserDefined will contain new metadata from the copy request. + // If the directive is "COPY", or undefined, the source object's metadata + // will be contained in srcInfo.UserDefined instead. This is done by the + // caller handler in minio. + + // if X-Amz-Tagging-Directive is set to "REPLACE", we need to set the copied + // object's tags to the tags provided in the copy request. If the directive + // is "COPY", or undefined, copy over any existing tags from the source + // object. + if td, ok := metadata[xhttp.AmzTagDirective]; ok && td == "REPLACE" { + metadata["s3:tags"] = metadata[xhttp.AmzObjectTagging] + } else if tags, ok := existingMetadata["s3:tags"]; ok { + metadata["s3:tags"] = tags + } + + delete(metadata, xhttp.AmzObjectTagging) + delete(metadata, xhttp.AmzTagDirective) + + // if X-Amz-Metadata-Directive header is set to "REPLACE", then + // srcInfo.UserDefined will be missing s3:etag, so make sure it's copied. + metadata["s3:etag"] = existingMetadata["s3:etag"] +} + // checkBucketError will stat the bucket if the provided error is not nil, in // order to check if the proper error to return is really a bucket not found // error. If the satellite has already returned this error, do not make an diff --git a/testsuite/miniogw/gateway_test.go b/testsuite/miniogw/gateway_test.go index 2fe3634..9e508e6 100644 --- a/testsuite/miniogw/gateway_test.go +++ b/testsuite/miniogw/gateway_test.go @@ -734,6 +734,65 @@ func TestCopyObjectMetadata(t *testing.T) { }) } +func TestCopyObjectSameSourceAndDestUpdatesMetadata(t *testing.T) { + t.Parallel() + + runTest(t, func(t *testing.T, ctx context.Context, layer minio.ObjectLayer, project *uplink.Project) { + // Explicitly disable CopyObject, as we want to test that copying metadata + // for same source and destination works even if the endpoint is disabled + // as it's an exceptional case. + s3Compatibility := miniogw.S3CompatibilityConfig{ + DisableCopyObject: true, + IncludeCustomMetadataListing: true, + MaxKeysLimit: 1000, + } + + layer, err := miniogw.NewStorjGateway(s3Compatibility).NewGatewayLayer(auth.Credentials{}) + require.NoError(t, err) + + defer func() { require.NoError(t, layer.Shutdown(ctx)) }() + + // Create the source bucket using the Uplink API + testBucketInfo, err := project.CreateBucket(ctx, testBucket) + require.NoError(t, err) + + // Create the source object using the Uplink API + metadata := map[string]string{ + "content-type": "text/plain", + "mykey": "originalvalue", + "s3:etag": "123", + "s3:tags": "key=value", + } + _, err = createFile(ctx, project, testBucketInfo.Name, testFile, []byte("test"), metadata) + require.NoError(t, err) + + // Get the source object info using the Minio API + srcInfo, err := layer.GetObjectInfo(ctx, testBucket, testFile, minio.ObjectOptions{}) + require.NoError(t, err) + + // Copy the object. Metadata and tagging copied from source to destination. + _, err = layer.CopyObject(ctx, testBucket, testFile, testBucket, testFile, srcInfo, minio.ObjectOptions{}, minio.ObjectOptions{}) + require.NoError(t, err) + obj, err := project.StatObject(ctx, testBucketInfo.Name, testFile) + require.NoError(t, err) + require.EqualValues(t, obj.Custom, metadata) + + // Copy the object with new metadata in request. This will be set on the copied object. + srcInfo.UserDefined = map[string]string{ + "mykey": "newvalue", + } + _, err = layer.CopyObject(ctx, testBucket, testFile, testBucket, testFile, srcInfo, minio.ObjectOptions{}, minio.ObjectOptions{}) + require.NoError(t, err) + obj, err = project.StatObject(ctx, testBucketInfo.Name, testFile) + require.NoError(t, err) + require.EqualValues(t, obj.Custom, map[string]string{ + "mykey": "newvalue", + "s3:etag": metadata["s3:etag"], + "s3:tags": metadata["s3:tags"], + }) + }) +} + func TestDeleteObject(t *testing.T) { t.Parallel()