diff options
author | Félix Sipma <felix+debian@gueux.org> | 2018-12-08 10:31:19 +0100 |
---|---|---|
committer | Félix Sipma <felix+debian@gueux.org> | 2018-12-08 10:31:19 +0100 |
commit | 70b5c53e64dae38106cf7e218cc99e6f493f12b9 (patch) | |
tree | 188d40029a4de9dcd6340222977941842d1be427 | |
parent | b95d64585c6015489ca8435e4dc422e111f25c67 (diff) |
New upstream version 6.0.11
33 files changed, 1843 insertions, 168 deletions
@@ -14,9 +14,9 @@ go get -u github.com/minio/minio-go ## Initialize Minio Client Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. -| Parameter | Description| +| Parameter | Description| | :--- | :--- | -| endpoint | URL to object storage service. | +| endpoint | URL to object storage service. | | accessKeyID | Access key is the user ID that uniquely identifies your account. | | secretAccessKey | Secret key is the password to your account. | | secure | Set this value to 'true' to enable secure (HTTPS) access. | @@ -85,8 +85,9 @@ func main() { } else { log.Fatalln(err) } + } else { + log.Printf("Successfully created %s\n", bucketName) } - log.Printf("Successfully created %s\n", bucketName) // Upload the zip file objectName := "golden-oldies.zip" @@ -106,7 +107,7 @@ func main() { ### Run FileUploader ```sh go run file-uploader.go -2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:28 Successfully created mymusic 2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 mc ls play/mymusic/ @@ -114,7 +115,7 @@ mc ls play/mymusic/ ``` ## API Reference -The full API Reference is available here. +The full API Reference is available here. * [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) @@ -139,7 +140,7 @@ The full API Reference is available here. ### API Reference : File Object Operations * [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) -* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FGetObject) * [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext) * [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext) @@ -154,6 +155,8 @@ The full API Reference is available here. * [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) * [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) * [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) +* [`SelectObjectContent`](https://docs.minio.io/docs/golang-client-api-reference#SelectObjectContent) + ### API Reference : Presigned Operations * [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) @@ -182,7 +185,11 @@ The full API Reference is available here. * [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) * [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) * [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) - + +### Full Examples : Bucket lifecycle Operations +* [setbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketlifecycle.go) +* [getbucketlifecycle.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketlifecycle.go) + ### Full Examples : Bucket notification Operations * [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) * [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) @@ -219,7 +226,7 @@ The full API Reference is available here. ## Explore Further * [Complete Documentation](https://docs.minio.io) -* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) +* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) * [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) ## Contribute diff --git a/api-compose-object.go b/api-compose-object.go index 5d3ede7..3ac36c5 100644 --- a/api-compose-object.go +++ b/api-compose-object.go @@ -101,7 +101,11 @@ func (d *DestinationInfo) getUserMetaHeadersMap(withCopyDirectiveHeader bool) ma r["x-amz-metadata-directive"] = "REPLACE" } for k, v := range d.userMetadata { - r["x-amz-meta-"+k] = v + if isAmzHeader(k) || isStandardHeader(k) || isStorageClassHeader(k) { + r[k] = v + } else { + r["x-amz-meta-"+k] = v + } } return r } @@ -358,10 +362,10 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo srcSizes := make([]int64, len(srcs)) var totalSize, size, totalParts int64 var srcUserMeta map[string]string - var etag string + etags := make([]string, len(srcs)) var err error for i, src := range srcs { - size, etag, srcUserMeta, err = src.getProps(c) + size, etags[i], srcUserMeta, err = src.getProps(c) if err != nil { return err } @@ -373,15 +377,6 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo fmt.Sprintf("Client side encryption is used in source object %s/%s", src.bucket, src.object)) } - // Since we did a HEAD to get size, we use the ETag - // value to make sure the object has not changed by - // the time we perform the copy. This is done, only if - // the user has not set their own ETag match - // condition. - if src.Headers.Get("x-amz-copy-source-if-match") == "" { - src.SetMatchETagCond(etag) - } - // Check if a segment is specified, and if so, is the // segment within object bounds? if src.start != -1 { @@ -429,7 +424,15 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo // Now, handle multipart-copy cases. - // 1. Initiate a new multipart upload. + // 1. Ensure that the object has not been changed while + // we are copying data. + for i, src := range srcs { + if src.Headers.Get("x-amz-copy-source-if-match") == "" { + src.SetMatchETagCond(etags[i]) + } + } + + // 2. Initiate a new multipart upload. // Set user-metadata on the destination object. If no // user-metadata is specified, and there is only one source, @@ -449,13 +452,13 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo return err } - // 2. Perform copy part uploads + // 3. Perform copy part uploads objParts := []CompletePart{} partIndex := 1 for i, src := range srcs { h := src.Headers if src.encryption != nil { - src.encryption.Marshal(h) + encrypt.SSECopy(src.encryption).Marshal(h) } // Add destination encryption headers if dst.encryption != nil { @@ -480,14 +483,14 @@ func (c Client) ComposeObjectWithProgress(dst DestinationInfo, srcs []SourceInfo return err } if progress != nil { - io.CopyN(ioutil.Discard, progress, start+end-1) + io.CopyN(ioutil.Discard, progress, end-start+1) } objParts = append(objParts, complPart) partIndex++ } } - // 3. Make final complete-multipart request. + // 4. Make final complete-multipart request. _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID, completeMultipartUpload{Parts: objParts}) if err != nil { diff --git a/api-compose-object_test.go b/api-compose-object_test.go index 14f0e18..295bbc2 100644 --- a/api-compose-object_test.go +++ b/api-compose-object_test.go @@ -18,6 +18,7 @@ package minio import ( "reflect" + "strings" "testing" ) @@ -118,3 +119,37 @@ func TestCalculateEvenSplits(t *testing.T) { } } } + +func TestGetUserMetaHeadersMap(t *testing.T) { + + userMetadata := map[string]string{ + "test": "test", + "x-amz-acl": "public-read-write", + "content-type": "application/binary", + "X-Amz-Storage-Class": "rrs", + "x-amz-grant-write": "test@exo.ch", + } + + destInfo := &DestinationInfo{"bucket", "object", nil, userMetadata} + + r := destInfo.getUserMetaHeadersMap(true) + + i := 0 + + if _, ok := r["x-amz-metadata-directive"]; !ok { + t.Errorf("Test %d - metadata directive was expected but is missing", i) + i++ + } + + for k := range r { + if strings.HasSuffix(k, "test") && !strings.HasPrefix(k, "x-amz-meta-") { + t.Errorf("Test %d - meta %q was expected as an x amz meta", i, k) + i++ + } + + if !strings.HasSuffix(k, "test") && strings.HasPrefix(k, "x-amz-meta-") { + t.Errorf("Test %d - an amz/standard/storageClass Header was expected but got an x amz meta data", i) + i++ + } + } +} diff --git a/api-get-lifecycle.go b/api-get-lifecycle.go new file mode 100644 index 0000000..8097bfc --- /dev/null +++ b/api-get-lifecycle.go @@ -0,0 +1,77 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketLifecycle - get bucket lifecycle. +func (c Client) GetBucketLifecycle(bucketName string) (string, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return "", err + } + bucketLifecycle, err := c.getBucketLifecycle(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchLifecycleConfiguration" { + return "", nil + } + return "", err + } + return bucketLifecycle, nil +} + +// Request server for current bucket lifecycle. +func (c Client) getBucketLifecycle(bucketName string) (string, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute GET on bucket to get lifecycle. + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + }) + + defer closeResponse(resp) + if err != nil { + return "", err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return "", httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketLifecycleBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return "", err + } + + lifecycle := string(bucketLifecycleBuf) + return lifecycle, err +} diff --git a/api-get-object-acl.go b/api-get-object-acl.go new file mode 100644 index 0000000..af5544d --- /dev/null +++ b/api-get-object-acl.go @@ -0,0 +1,136 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "context" + "net/http" + "net/url" +) + +type accessControlPolicy struct { + Owner struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + } `xml:"Owner"` + AccessControlList struct { + Grant []struct { + Grantee struct { + ID string `xml:"ID"` + DisplayName string `xml:"DisplayName"` + URI string `xml:"URI"` + } `xml:"Grantee"` + Permission string `xml:"Permission"` + } `xml:"Grant"` + } `xml:"AccessControlList"` +} + +//GetObjectACL get object ACLs +func (c Client) GetObjectACL(bucketName, objectName string) (*ObjectInfo, error) { + + resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: url.Values{ + "acl": []string{""}, + }, + }) + if err != nil { + return nil, err + } + defer closeResponse(resp) + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, objectName) + } + + res := &accessControlPolicy{} + + if err := xmlDecoder(resp.Body, res); err != nil { + return nil, err + } + + objInfo, err := c.statObject(context.Background(), bucketName, objectName, StatObjectOptions{}) + if err != nil { + return nil, err + } + + cannedACL := getCannedACL(res) + if cannedACL != "" { + objInfo.Metadata.Add("X-Amz-Acl", cannedACL) + return &objInfo, nil + } + + grantACL := getAmzGrantACL(res) + for k, v := range grantACL { + objInfo.Metadata[k] = v + } + + return &objInfo, nil +} + +func getCannedACL(aCPolicy *accessControlPolicy) string { + grants := aCPolicy.AccessControlList.Grant + + switch { + case len(grants) == 1: + if grants[0].Grantee.URI == "" && grants[0].Permission == "FULL_CONTROL" { + return "private" + } + case len(grants) == 2: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { + return "authenticated-read" + } + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { + return "public-read" + } + if g.Permission == "READ" && g.Grantee.ID == aCPolicy.Owner.ID { + return "bucket-owner-read" + } + } + case len(grants) == 3: + for _, g := range grants { + if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { + return "public-read-write" + } + } + } + return "" +} + +func getAmzGrantACL(aCPolicy *accessControlPolicy) map[string][]string { + grants := aCPolicy.AccessControlList.Grant + res := map[string][]string{} + + for _, g := range grants { + switch { + case g.Permission == "READ": + res["X-Amz-Grant-Read"] = append(res["X-Amz-Grant-Read"], "id="+g.Grantee.ID) + case g.Permission == "WRITE": + res["X-Amz-Grant-Write"] = append(res["X-Amz-Grant-Write"], "id="+g.Grantee.ID) + case g.Permission == "READ_ACP": + res["X-Amz-Grant-Read-Acp"] = append(res["X-Amz-Grant-Read-Acp"], "id="+g.Grantee.ID) + case g.Permission == "WRITE_ACP": + res["X-Amz-Grant-Write-Acp"] = append(res["X-Amz-Grant-Write-Acp"], "id="+g.Grantee.ID) + case g.Permission == "FULL_CONTROL": + res["X-Amz-Grant-Full-Control"] = append(res["X-Amz-Grant-Full-Control"], "id="+g.Grantee.ID) + } + } + return res +} diff --git a/api-get-options.go b/api-get-options.go index a5a8752..dbf062d 100644 --- a/api-get-options.go +++ b/api-get-options.go @@ -44,7 +44,7 @@ func (o GetObjectOptions) Header() http.Header { for k, v := range o.headers { headers.Set(k, v) } - if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() != encrypt.S3 { + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { o.ServerSideEncryption.Marshal(headers) } return headers diff --git a/api-list.go b/api-list.go index 3cfb47d..04f7573 100644 --- a/api-list.go +++ b/api-list.go @@ -118,7 +118,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d var continuationToken string for { // Get list of objects a maximum of 1000 per request. - result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000) + result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000, "") if err != nil { objectStatCh <- ObjectInfo{ Err: err, @@ -171,11 +171,12 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. // request parameters :- // --------- -// ?continuation-token - Specifies the key to start with when listing objects in a bucket. +// ?continuation-token - Used to continue iterating over a set of objects // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { +// ?start-after - Specifies the key to start after when listing objects in a bucket. +func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { // Validate bucket name. if err := s3utils.CheckValidBucketName(bucketName); err != nil { return ListBucketV2Result{}, err @@ -216,6 +217,11 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s // Set max keys. urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + // Set start-after + if startAfter != "" { + urlValues.Set("start-after", startAfter) + } + // Execute GET on bucket to list objects. resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{ bucketName: bucketName, @@ -627,30 +633,27 @@ func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsI return partsInfo, nil } -// findUploadID lists all incomplete uploads and finds the uploadID of the matching object name. -func (c Client) findUploadID(bucketName, objectName string) (uploadID string, err error) { +// findUploadIDs lists all incomplete uploads and find the uploadIDs of the matching object name. +func (c Client) findUploadIDs(bucketName, objectName string) ([]string, error) { + var uploadIDs []string // Make list incomplete uploads recursive. isRecursive := true // Turn off size aggregation of individual parts, in this request. isAggregateSize := false - // latestUpload to track the latest multipart info for objectName. - var latestUpload ObjectMultipartInfo // Create done channel to cleanup the routine. doneCh := make(chan struct{}) defer close(doneCh) // List all incomplete uploads. for mpUpload := range c.listIncompleteUploads(bucketName, objectName, isRecursive, isAggregateSize, doneCh) { if mpUpload.Err != nil { - return "", mpUpload.Err + return nil, mpUpload.Err } if objectName == mpUpload.Key { - if mpUpload.Initiated.Sub(latestUpload.Initiated) > 0 { - latestUpload = mpUpload - } + uploadIDs = append(uploadIDs, mpUpload.UploadID) } } // Return the latest upload id. - return latestUpload.UploadID, nil + return uploadIDs, nil } // getTotalMultipartSize - calculate total uploaded size for the a given multipart object. diff --git a/api-put-bucket.go b/api-put-bucket.go index cb9d8f2..33dc0cf 100644 --- a/api-put-bucket.go +++ b/api-put-bucket.go @@ -178,6 +178,87 @@ func (c Client) removeBucketPolicy(bucketName string) error { return nil } +// SetBucketLifecycle set the lifecycle on an existing bucket. +func (c Client) SetBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // If lifecycle is empty then delete it. + if lifecycle == "" { + return c.removeBucketLifecycle(bucketName) + } + + // Save the updated lifecycle. + return c.putBucketLifecycle(bucketName, lifecycle) +} + +// Saves a new bucket lifecycle. +func (c Client) putBucketLifecycle(bucketName, lifecycle string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Content-length is mandatory for put lifecycle request + lifecycleReader := strings.NewReader(lifecycle) + b, err := ioutil.ReadAll(lifecycleReader) + if err != nil { + return err + } + + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: lifecycleReader, + contentLength: int64(len(b)), + contentMD5Base64: sumMD5Base64(b), + } + + // Execute PUT to upload a new bucket lifecycle. + resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Remove lifecycle from a bucket. +func (c Client) removeBucketLifecycle(bucketName string) error { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("lifecycle", "") + + // Execute DELETE on objectName. + resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Hex: emptySHA256Hex, + }) + defer closeResponse(resp) + if err != nil { + return err + } + return nil +} + // SetBucketNotification saves a new bucket notification. func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { // Input validation. diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go index 52dc069..db92520 100644 --- a/api-put-object-multipart.go +++ b/api-put-object-multipart.go @@ -259,7 +259,11 @@ func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID // Set encryption headers, if any. customHeader := make(http.Header) - if sse != nil && sse.Type() != encrypt.S3 && sse.Type() != encrypt.KMS { + // https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPart.html + // Server-side encryption is supported by the S3 Multipart Upload actions. + // Unless you are using a customer-provided encryption key, you don't need + // to specify the encryption parameters in each UploadPart request. + if sse != nil && sse.Type() == encrypt.SSEC { sse.Marshal(customHeader) } diff --git a/api-put-object.go b/api-put-object.go index 45ae11d..0330cd9 100644 --- a/api-put-object.go +++ b/api-put-object.go @@ -28,7 +28,7 @@ import ( "github.com/minio/minio-go/pkg/encrypt" "github.com/minio/minio-go/pkg/s3utils" - "golang.org/x/net/lex/httplex" + "golang.org/x/net/http/httpguts" ) // PutObjectOptions represents options specified by user for PutObject call @@ -101,10 +101,10 @@ func (opts PutObjectOptions) Header() (header http.Header) { // validate() checks if the UserMetadata map has standard headers or and raises an error if so. func (opts PutObjectOptions) validate() (err error) { for k, v := range opts.UserMetadata { - if !httplex.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { + if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) { return ErrInvalidArgument(k + " unsupported user defined metadata name") } - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { return ErrInvalidArgument(v + " unsupported user defined metadata value") } } diff --git a/api-remove.go b/api-remove.go index c2ffcdd..f33df4d 100644 --- a/api-remove.go +++ b/api-remove.go @@ -233,18 +233,20 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { if err := s3utils.CheckValidObjectName(objectName); err != nil { return err } - // Find multipart upload id of the object to be aborted. - uploadID, err := c.findUploadID(bucketName, objectName) + // Find multipart upload ids of the object to be aborted. + uploadIDs, err := c.findUploadIDs(bucketName, objectName) if err != nil { return err } - if uploadID != "" { - // Upload id found, abort the incomplete multipart upload. + + for _, uploadID := range uploadIDs { + // abort incomplete multipart upload, based on the upload id passed. err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID) if err != nil { return err } } + return nil } diff --git a/api-select.go b/api-select.go new file mode 100644 index 0000000..a9b6f17 --- /dev/null +++ b/api-select.go @@ -0,0 +1,520 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/xml" + "errors" + "fmt" + "hash" + "hash/crc32" + "io" + "net/http" + "net/url" + "strings" + + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" +) + +// CSVFileHeaderInfo - is the parameter for whether to utilize headers. +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - is the parameter for what type of compression is +// present +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" +) + +// CSVQuoteFields - is the parameter for how CSV fields are quoted. +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - is of what syntax the expression is, this should only +// be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "DOCUMENT" + JSONLinesType = "LINES" +) + +// ParquetInputOptions parquet input specific options +type ParquetInputOptions struct{} + +// CSVInputOptions csv input specific options +type CSVInputOptions struct { + FileHeaderInfo CSVFileHeaderInfo + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string + Comments string +} + +// CSVOutputOptions csv output specific options +type CSVOutputOptions struct { + QuoteFields CSVQuoteFields + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string +} + +// JSONInputOptions json input specific options +type JSONInputOptions struct { + Type JSONType +} + +// JSONOutputOptions - json output specific options +type JSONOutputOptions struct { + RecordDelimiter string +} + +// SelectObjectInputSerialization - input serialization parameters +type SelectObjectInputSerialization struct { + CompressionType SelectCompressionType + Parquet *ParquetInputOptions `xml:"Parquet,omitempty"` + CSV *CSVInputOptions `xml:"CSV,omitempty"` + JSON *JSONInputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOutputSerialization - output serialization parameters. +type SelectObjectOutputSerialization struct { + CSV *CSVOutputOptions `xml:"CSV,omitempty"` + JSON *JSONOutputOptions `xml:"JSON,omitempty"` +} + +// SelectObjectOptions - represents the input select body +type SelectObjectOptions struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + ServerSideEncryption encrypt.ServerSide `xml:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization SelectObjectInputSerialization + OutputSerialization SelectObjectOutputSerialization + RequestProgress struct { + Enabled bool + } +} + +// Header returns the http.Header representation of the SelectObject options. +func (o SelectObjectOptions) Header() http.Header { + headers := make(http.Header) + if o.ServerSideEncryption != nil && o.ServerSideEncryption.Type() == encrypt.SSEC { + o.ServerSideEncryption.Marshal(headers) + } + return headers +} + +// SelectObjectType - is the parameter which defines what type of object the +// operation is being performed on. +type SelectObjectType string + +// Constants for input data types. +const ( + SelectObjectTypeCSV SelectObjectType = "CSV" + SelectObjectTypeJSON = "JSON" + SelectObjectTypeParquet = "Parquet" +) + +// preludeInfo is used for keeping track of necessary information from the +// prelude. +type preludeInfo struct { + totalLen uint32 + headerLen uint32 +} + +// SelectResults is used for the streaming responses from the server. +type SelectResults struct { + pipeReader *io.PipeReader + resp *http.Response + stats *StatsMessage + progress *ProgressMessage +} + +// ProgressMessage is a struct for progress xml message. +type ProgressMessage struct { + XMLName xml.Name `xml:"Progress" json:"-"` + StatsMessage +} + +// StatsMessage is a struct for stat xml message. +type StatsMessage struct { + XMLName xml.Name `xml:"Stats" json:"-"` + BytesScanned int64 + BytesProcessed int64 + BytesReturned int64 +} + +// eventType represents the type of event. +type eventType string + +// list of event-types returned by Select API. +const ( + endEvent eventType = "End" + errorEvent = "Error" + recordsEvent = "Records" + progressEvent = "Progress" + statsEvent = "Stats" +) + +// contentType represents content type of event. +type contentType string + +const ( + xmlContent contentType = "text/xml" +) + +// SelectObjectContent is a implementation of http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html AWS S3 API. +func (c Client) SelectObjectContent(ctx context.Context, bucketName, objectName string, opts SelectObjectOptions) (*SelectResults, error) { + // Input validation. + if err := s3utils.CheckValidBucketName(bucketName); err != nil { + return nil, err + } + if err := s3utils.CheckValidObjectName(objectName); err != nil { + return nil, err + } + + selectReqBytes, err := xml.Marshal(opts) + if err != nil { + return nil, err + } + + urlValues := make(url.Values) + urlValues.Set("select", "") + urlValues.Set("select-type", "2") + + // Execute POST on bucket/object. + resp, err := c.executeMethod(ctx, "POST", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + customHeader: opts.Header(), + contentMD5Base64: sumMD5Base64(selectReqBytes), + contentSHA256Hex: sum256Hex(selectReqBytes), + contentBody: bytes.NewReader(selectReqBytes), + contentLength: int64(len(selectReqBytes)), + }) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp, bucketName, "") + } + + pipeReader, pipeWriter := io.Pipe() + streamer := &SelectResults{ + resp: resp, + stats: &StatsMessage{}, + progress: &ProgressMessage{}, + pipeReader: pipeReader, + } + streamer.start(pipeWriter) + return streamer, nil +} + +// Close - closes the underlying response body and the stream reader. +func (s *SelectResults) Close() error { + defer closeResponse(s.resp) + return s.pipeReader.Close() +} + +// Read - is a reader compatible implementation for SelectObjectContent records. +func (s *SelectResults) Read(b []byte) (n int, err error) { + return s.pipeReader.Read(b) +} + +// Stats - information about a request's stats when processing is complete. +func (s *SelectResults) Stats() *StatsMessage { + return s.stats +} + +// Progress - information about the progress of a request. +func (s *SelectResults) Progress() *ProgressMessage { + return s.progress +} + +// start is the main function that decodes the large byte array into +// several events that are sent through the eventstream. +func (s *SelectResults) start(pipeWriter *io.PipeWriter) { + go func() { + for { + var prelude preludeInfo + var headers = make(http.Header) + var err error + + // Create CRC code + crc := crc32.New(crc32.IEEETable) + crcReader := io.TeeReader(s.resp.Body, crc) + + // Extract the prelude(12 bytes) into a struct to extract relevant information. + prelude, err = processPrelude(crcReader, crc) + if err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + // Extract the headers(variable bytes) into a struct to extract relevant information + if prelude.headerLen > 0 { + if err = extractHeader(io.LimitReader(crcReader, int64(prelude.headerLen)), headers); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + } + + // Get the actual payload length so that the appropriate amount of + // bytes can be read or parsed. + payloadLen := prelude.PayloadLen() + + // Get content-type of the payload. + c := contentType(headers.Get("content-type")) + + // Get event type of the payload. + e := eventType(headers.Get("event-type")) + + // Handle all supported events. + switch e { + case endEvent: + pipeWriter.Close() + closeResponse(s.resp) + return + case errorEvent: + pipeWriter.CloseWithError(errors.New("Error Type of " + headers.Get("error-type") + " " + headers.Get("error-message"))) + closeResponse(s.resp) + return + case recordsEvent: + if _, err = io.Copy(pipeWriter, io.LimitReader(crcReader, payloadLen)); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + case progressEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.progress); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, progressEvent)) + closeResponse(s.resp) + return + } + case statsEvent: + switch c { + case xmlContent: + if err = xmlDecoder(io.LimitReader(crcReader, payloadLen), s.stats); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + default: + pipeWriter.CloseWithError(fmt.Errorf("Unexpected content-type %s sent for event-type %s", c, statsEvent)) + closeResponse(s.resp) + return + } + } + + // Ensures that the full message's CRC is correct and + // that the message is not corrupted + if err := checkCRC(s.resp.Body, crc.Sum32()); err != nil { + pipeWriter.CloseWithError(err) + closeResponse(s.resp) + return + } + + } + }() +} + +// PayloadLen is a function that calculates the length of the payload. +func (p preludeInfo) PayloadLen() int64 { + return int64(p.totalLen - p.headerLen - 16) +} + +// processPrelude is the function that reads the 12 bytes of the prelude and +// ensures the CRC is correct while also extracting relevant information into +// the struct, +func processPrelude(prelude io.Reader, crc hash.Hash32) (preludeInfo, error) { + var err error + var pInfo = preludeInfo{} + + // reads total length of the message (first 4 bytes) + pInfo.totalLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // reads total header length of the message (2nd 4 bytes) + pInfo.headerLen, err = extractUint32(prelude) + if err != nil { + return pInfo, err + } + + // checks that the CRC is correct (3rd 4 bytes) + preCRC := crc.Sum32() + if err := checkCRC(prelude, preCRC); err != nil { + return pInfo, err + } + + return pInfo, nil +} + +// extracts the relevant information from the Headers. +func extractHeader(body io.Reader, myHeaders http.Header) error { + for { + // extracts the first part of the header, + headerTypeName, err := extractHeaderType(body) + if err != nil { + // Since end of file, we have read all of our headers + if err == io.EOF { + break + } + return err + } + + // reads the 7 present in the header and ignores it. + extractUint8(body) + + headerValueName, err := extractHeaderValue(body) + if err != nil { + return err + } + + myHeaders.Set(headerTypeName, headerValueName) + + } + return nil +} + +// extractHeaderType extracts the first half of the header message, the header type. +func extractHeaderType(body io.Reader) (string, error) { + // extracts 2 bit integer + headerNameLen, err := extractUint8(body) + if err != nil { + return "", err + } + // extracts the string with the appropriate number of bytes + headerName, err := extractString(body, int(headerNameLen)) + if err != nil { + return "", err + } + return strings.TrimPrefix(headerName, ":"), nil +} + +// extractsHeaderValue extracts the second half of the header message, the +// header value +func extractHeaderValue(body io.Reader) (string, error) { + bodyLen, err := extractUint16(body) + if err != nil { + return "", err + } + bodyName, err := extractString(body, int(bodyLen)) + if err != nil { + return "", err + } + return bodyName, nil +} + +// extracts a string from byte array of a particular number of bytes. +func extractString(source io.Reader, lenBytes int) (string, error) { + myVal := make([]byte, lenBytes) + _, err := source.Read(myVal) + if err != nil { + return "", err + } + return string(myVal), nil +} + +// extractUint32 extracts a 4 byte integer from the byte array. +func extractUint32(r io.Reader) (uint32, error) { + buf := make([]byte, 4) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint32(buf), nil +} + +// extractUint16 extracts a 2 byte integer from the byte array. +func extractUint16(r io.Reader) (uint16, error) { + buf := make([]byte, 2) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return binary.BigEndian.Uint16(buf), nil +} + +// extractUint8 extracts a 1 byte integer from the byte array. +func extractUint8(r io.Reader) (uint8, error) { + buf := make([]byte, 1) + _, err := io.ReadFull(r, buf) + if err != nil { + return 0, err + } + return buf[0], nil +} + +// checkCRC ensures that the CRC matches with the one from the reader. +func checkCRC(r io.Reader, expect uint32) error { + msgCRC, err := extractUint32(r) + if err != nil { + return err + } + + if msgCRC != expect { + return fmt.Errorf("Checksum Mismatch, MessageCRC of 0x%X does not equal expected CRC of 0x%X", msgCRC, expect) + + } + return nil +} diff --git a/api-stat.go b/api-stat.go index 3b054c3..91e9d39 100644 --- a/api-stat.go +++ b/api-stat.go @@ -47,6 +47,10 @@ func (c Client) BucketExists(bucketName string) (bool, error) { return false, err } if resp != nil { + resperr := httpRespToErrorResponse(resp, bucketName, "") + if ToErrorResponse(resperr).Code == "NoSuchBucket" { + return false, nil + } if resp.StatusCode != http.StatusOK { return false, httpRespToErrorResponse(resp, bucketName, "") } @@ -99,7 +99,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "6.0.1" + libraryVersion = "v6.0.11" ) // User Agent should always following the below style. @@ -455,22 +455,9 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { return err } } else { - // WORKAROUND for https://github.com/golang/go/issues/13942. - // httputil.DumpResponse does not print response headers for - // all successful calls which have response ContentLength set - // to zero. Keep this workaround until the above bug is fixed. - if resp.ContentLength == 0 { - var buffer bytes.Buffer - if err = resp.Header.Write(&buffer); err != nil { - return err - } - respTrace = buffer.Bytes() - respTrace = append(respTrace, []byte("\r\n")...) - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } + respTrace, err = httputil.DumpResponse(resp, false) + if err != nil { + return err } } @@ -599,8 +586,8 @@ func (c Client) executeMethod(ctx context.Context, method string, metadata reque // Initiate the request. res, err = c.do(req) if err != nil { - // For supported network errors verify. - if isNetErrorRetryable(err) { + // For supported http requests errors verify. + if isHTTPReqErrorRetryable(err) { continue // Retry. } // For other errors, return here no need to retry. @@ -833,7 +820,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, isV host = c.s3AccelerateEndpoint } else { // Do not change the host if the endpoint URL is a FIPS S3 endpoint. - if !s3utils.IsAmazonFIPSGovCloudEndpoint(*c.endpointURL) { + if !s3utils.IsAmazonFIPSEndpoint(*c.endpointURL) { // Fetch new host based on the bucket location. host = getS3Endpoint(bucketLocation) } diff --git a/appveyor.yml b/appveyor.yml index aa9f840..48ea6e7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -16,7 +16,7 @@ install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - go version - go env - - go get -u github.com/golang/lint/golint + - go get -u golang.org/x/lint/golint - go get -u github.com/remyoudompheng/go-misc/deadcode - go get -u github.com/gordonklaus/ineffassign - go get -u golang.org/x/crypto/argon2 @@ -21,6 +21,8 @@ import ( "context" "io" "strings" + + "github.com/minio/minio-go/pkg/encrypt" ) // Core - Inherits Client and adds new methods to expose the low level S3 APIs. @@ -48,9 +50,9 @@ func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) } // ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses -// continuationToken instead of marker to further filter the results. -func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { - return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys) +// continuationToken instead of marker to support iteration over the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int, startAfter string) (ListBucketV2Result, error) { + return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys, startAfter) } // CopyObject - copies an object from source object to destination object on server side. @@ -68,7 +70,7 @@ func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string } // PutObject - Upload object. Uploads using single PUT call. -func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) { +func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string, sse encrypt.ServerSide) (ObjectInfo, error) { opts := PutObjectOptions{} m := make(map[string]string) for k, v := range metadata { @@ -89,6 +91,7 @@ func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Ba } } opts.UserMetadata = m + opts.ServerSideEncryption = sse return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts) } @@ -104,8 +107,8 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de } // PutObjectPart - Upload an object part. -func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) { - return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, nil) +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string, sse encrypt.ServerSide) (ObjectPart, error) { + return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, sse) } // ListObjectParts - List uploaded parts of an incomplete upload.x @@ -114,11 +117,11 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker } // CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. -func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { - _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ +func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) (string, error) { + res, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{ Parts: parts, }) - return err + return res.ETag, err } // AbortMultipartUpload - Abort an incomplete upload. diff --git a/core_test.go b/core_test.go index e76a2a0..5afdd18 100644 --- a/core_test.go +++ b/core_test.go @@ -410,7 +410,7 @@ func TestCoreCopyObject(t *testing.T) { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ "Content-Type": "binary/octet-stream", - }) + }, nil) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -525,7 +525,7 @@ func TestCoreCopyObjectPart(t *testing.T) { objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ "Content-Type": "binary/octet-stream", - }) + }, nil) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -565,7 +565,7 @@ func TestCoreCopyObjectPart(t *testing.T) { } // Complete the multipart upload - err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart}) + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart}) if err != nil { t.Fatal("Error:", err, destBucketName, destObjectName) } @@ -672,12 +672,12 @@ func TestCorePutObject(t *testing.T) { metadata := make(map[string]string) metadata["Content-Type"] = objectContentType - objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata) + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata, nil) if err == nil { t.Fatal("Error expected: error, got: nil(success)") } - objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata) + objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata, nil) if err != nil { t.Fatal("Error:", err, bucketName, objectName) } @@ -753,7 +753,7 @@ func TestCoreGetObjectMetadata(t *testing.T) { } _, err = core.PutObject(bucketName, "my-objectname", - bytes.NewReader([]byte("hello")), 5, "", "", metadata) + bytes.NewReader([]byte("hello")), 5, "", "", metadata, nil) if err != nil { log.Fatalln(err) } diff --git a/docs/API.md b/docs/API.md index 91a40d7..5778216 100644 --- a/docs/API.md +++ b/docs/API.md @@ -58,8 +58,8 @@ func main() { | [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`StatObject`](#StatObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`TraceOff`](#TraceOff) | | [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) | | [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | | [`ListenBucketNotification`](#ListenBucketNotification) | | -| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | | | -| | [`FPutObject`](#FPutObject) | [`FPutObject`](#FPutObject) | | | | +| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`SetBucketLifecycle`](#SetBucketLifecycle) | | +| | [`FPutObject`](#FPutObject) | [`FPutObject`](#FPutObject) | | [`GetBucketLifecycle`](#GetBucketLifecycle) | | | | [`FGetObject`](#FGetObject) | [`FGetObject`](#FGetObject) | | | | | | [`ComposeObject`](#ComposeObject) | [`ComposeObject`](#ComposeObject) | | | | | | [`NewSourceInfo`](#NewSourceInfo) | [`NewSourceInfo`](#NewSourceInfo) | | | | @@ -68,7 +68,8 @@ func main() { | | [`GetObjectWithContext`](#GetObjectWithContext) | [`GetObjectWithContext`](#GetObjectWithContext) | | | | | [`FPutObjectWithContext`](#FPutObjectWithContext) | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | | | [`FGetObjectWithContext`](#FGetObjectWithContext) | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | -| | [`RemoveObjectsWithContext`](#RemoveObjectsWithContext) | | | | +| | [`RemoveObjectsWithContext`](#RemoveObjectsWithContext) | | | | +| | [`SelectObjectContent`](#SelectObjectContent) | | ## 1. Constructor <a name="Minio"></a> @@ -1038,7 +1039,7 @@ Parameters |:---|:---| :---| |`ctx` | _context.Context_ |Request context | |`bucketName` | _string_ |Name of the bucket | -|`objectsCh` | _chan string_ | Channel of objects to be removed | +|`objectsCh` | _chan string_ | Channel of objects to be removed | __Return Values__ @@ -1068,6 +1069,59 @@ for rErr := range minioClient.RemoveObjects(ctx, "my-bucketname", objectsCh) { fmt.Println("Error detected during deletion: ", rErr) } ``` +<a name="SelectObjectContent"></a> +### SelectObjectContent(ctx context.Context, bucketName string, objectsName string, expression string, options SelectObjectOptions) *SelectResults +Parameters + +|Param |Type |Description | +|:---|:---| :---| +|`ctx` | _context.Context_ |Request context | +|`bucketName` | _string_ |Name of the bucket | +|`objectName` | _string_ |Name of the object | +|`options` | _SelectObjectOptions_ | Query Options | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`SelectResults` | _SelectResults_ | Is an io.ReadCloser object which can be directly passed to csv.NewReader for processing output. | + +```go + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + opts := minio.SelectObjectOptions{ + Expression: "select count(*) from s3object", + ExpressionType: minio.QueryExpressionTypeSQL, + InputSerialization: minio.SelectObjectInputSerialization{ + CompressionType: minio.SelectCompressionNONE, + CSV: &minio.CSVInputOptions{ + FileHeaderInfo: minio.CSVFileHeaderInfoNone, + RecordDelimiter: "\n", + FieldDelimiter: ",", + }, + }, + OutputSerialization: minio.SelectObjectOutputSerialization{ + CSV: &minio.CSVOutputOptions{ + RecordDelimiter: "\n", + FieldDelimiter: ",", + }, + }, + } + + reader, err := s3Client.SelectObjectContent(context.Background(), "mycsvbucket", "mycsv.csv", opts) + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + if _, err := io.Copy(os.Stdout, reader); err != nil { + log.Fatalln(err) + } +``` <a name="RemoveIncompleteUpload"></a> ### RemoveIncompleteUpload(bucketName, objectName string) error @@ -1445,6 +1499,72 @@ for notificationInfo := range minioClient.ListenBucketNotification("mybucket", " } ``` +<a name="SetBucketLifecycle"></a> +### SetBucketLifecycle(bucketname, lifecycle string) error +Set lifecycle on bucket or an object prefix. + +__Parameters__ + +|Param |Type |Description | +|:---|:---| :---| +|`bucketName` | _string_ |Name of the bucket| +|`lifecycle` | _string_ |Lifecycle to be set | + +__Return Values__ + +|Param |Type |Description | +|:---|:---| :---| +|`err` | _error_ |Standard Error | + +__Example__ + +```go +lifecycle := `<LifecycleConfiguration> + <Rule> + <ID>expire-bucket</ID> + <Prefix></Prefix> + <Status>Enabled</Status> + <Expiration> + <Days>365</Days> + </Expiration> + </Rule> +</LifecycleConfiguration>` + +err = minioClient.SetBucketLifecycle("my-bucketname", lifecycle) +if err != nil { + fmt.Println(err) + return +} +``` + +<a name="GetBucketLifecycle"></a> +### GetBucketLifecycle(bucketName) (lifecycle string, error) +Get lifecycle on a bucket or a prefix. + +__Parameters__ + + +|Param |Type |Description | +|:---|:---| :---| +|`bucketName` | _string_ |Name of the bucket | + +__Return Values__ + + +|Param |Type |Description | +|:---|:---| :---| +|`lifecycle` | _string_ |Lifecycle returned from the server | +|`err` | _error_ |Standard Error | + +__Example__ + +```go +lifecycle, err := minioClient.GetBucketLifecycle("my-bucketname") +if err != nil { + log.Fatalln(err) +} +``` + ## 7. Client custom settings <a name="SetAppInfo"></a> diff --git a/examples/s3/getbucketlifecycle.go b/examples/s3/getbucketlifecycle.go new file mode 100644 index 0000000..2e3ef41 --- /dev/null +++ b/examples/s3/getbucketlifecycle.go @@ -0,0 +1,65 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "io" + "log" + "os" + "strings" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Get bucket lifecycle from S3 + lifecycle, err := s3Client.GetBucketLifecycle("my-bucketname") + if err != nil { + log.Fatalln(err) + } + + // Create lifecycle file + localLifecycleFile, err := os.Create("lifecycle.json") + if err != nil { + log.Fatalln(err) + } + defer localLifecycleFile.Close() + + lifecycleReader := strings.NewReader(lifecycle) + + if _, err := io.Copy(localLifecycleFile, lifecycleReader); err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/getobjectacl.go b/examples/s3/getobjectacl.go new file mode 100644 index 0000000..f2bbd95 --- /dev/null +++ b/examples/s3/getobjectacl.go @@ -0,0 +1,53 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018-2019 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "fmt" + "log" + + minio "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + if err != nil { + log.Fatalln(err) + } + + objectInfo, err := s3Client.GetObjectACL("my-bucketname", "my-objectname") + if err != nil { + log.Fatalln(err) + } + + //print all value header (acl, metadata, standard header value...) + for k, v := range objectInfo.Metadata { + fmt.Println("key:", k) + fmt.Printf(" - value: %v\n", v) + } +} diff --git a/examples/s3/selectobject.go b/examples/s3/selectobject.go new file mode 100644 index 0000000..e23ccf8 --- /dev/null +++ b/examples/s3/selectobject.go @@ -0,0 +1,73 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "context" + "io" + "log" + "os" + + minio "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and + // my-testfile are dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true) + if err != nil { + log.Fatalln(err) + } + + opts := minio.SelectObjectOptions{ + Expression: "select count(*) from s3object", + ExpressionType: minio.QueryExpressionTypeSQL, + InputSerialization: minio.SelectObjectInputSerialization{ + CompressionType: minio.SelectCompressionNONE, + CSV: &minio.CSVInputOptions{ + FileHeaderInfo: minio.CSVFileHeaderInfoNone, + RecordDelimiter: "\n", + FieldDelimiter: ",", + }, + }, + OutputSerialization: minio.SelectObjectOutputSerialization{ + CSV: &minio.CSVOutputOptions{ + RecordDelimiter: "\n", + FieldDelimiter: ",", + }, + }, + } + + reader, err := s3Client.SelectObjectContent(context.Background(), "mycsvbucket", "mycsv.csv", opts) + if err != nil { + log.Fatalln(err) + } + defer reader.Close() + + if _, err := io.Copy(os.Stdout, reader); err != nil { + log.Fatalln(err) + } +} diff --git a/examples/s3/setbucketlifecycle.go b/examples/s3/setbucketlifecycle.go new file mode 100644 index 0000000..7eaa946 --- /dev/null +++ b/examples/s3/setbucketlifecycle.go @@ -0,0 +1,50 @@ +// +build ignore + +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage + * Copyright 2015-2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "log" + + "github.com/minio/minio-go" +) + +func main() { + // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are + // dummy values, please replace them with original values. + + // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access. + // This boolean value is the last argument for New(). + + // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically + // determined based on the Endpoint value. + s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) + if err != nil { + log.Fatalln(err) + } + + // s3Client.TraceOn(os.Stderr) + + // Set lifecycle on a bucket + lifecycle := `<LifecycleConfiguration><Rule><ID>expire-bucket</ID><Prefix></Prefix><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>` + err = s3Client.SetBucketLifecycle("my-bucketname", lifecycle) + if err != nil { + log.Fatalln(err) + } +} diff --git a/functional_tests.go b/functional_tests.go index 421b30e..62be0e9 100644 --- a/functional_tests.go +++ b/functional_tests.go @@ -569,7 +569,7 @@ func testPutObjectReadAt() { logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err) return } - if st.ContentType != objectContentType { + if st.ContentType != objectContentType && st.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content types don't match", err) return } @@ -683,7 +683,7 @@ func testPutObjectWithMetadata() { logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) return } - if st.ContentType != customContentType { + if st.ContentType != customContentType && st.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err) return } @@ -751,7 +751,7 @@ func testPutObjectWithContentLanguage() { data := bytes.Repeat([]byte("a"), int(0)) n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(0), minio.PutObjectOptions{ - ContentLanguage: "en-US", + ContentLanguage: "en", }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -769,8 +769,8 @@ func testPutObjectWithContentLanguage() { return } - if objInfo.Metadata.Get("Content-Language") != "en-US" { - logError(testName, function, args, startTime, "", "Expected content-language 'en-US' doesn't match with StatObject return value", err) + if objInfo.Metadata.Get("Content-Language") != "en" { + logError(testName, function, args, startTime, "", "Expected content-language 'en' doesn't match with StatObject return value", err) return } @@ -1359,7 +1359,7 @@ func testFPutObjectMultipart() { logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err) return } - if objInfo.ContentType != objectContentType { + if objInfo.ContentType != objectContentType && objInfo.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "ContentType doesn't match", err) return } @@ -1499,6 +1499,7 @@ func testFPutObject() { // Perform FPutObject with no contentType provided (Expecting application/x-gtar) args["objectName"] = objectName + "-GTar" + args["opts"] = minio.PutObjectOptions{} n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{}) if err != nil { logError(testName, function, args, startTime, "", "FPutObject failed", err) @@ -1541,8 +1542,8 @@ func testFPutObject() { logError(testName, function, args, startTime, "", "StatObject failed", err) return } - if rGTar.ContentType != "application/x-gtar" { - logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rGTar.ContentType, err) + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { + logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar or application/octet-stream, got "+rGTar.ContentType, err) return } @@ -2270,7 +2271,8 @@ func testPresignedPostPolicy() { defer reader.Close() objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") - metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "") + // Azure requires the key to not start with a number + metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "user") metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "") buf, err := ioutil.ReadAll(reader) @@ -2623,8 +2625,14 @@ func testCopyObject() { return } + oi, err := c.StatObject(bucketName, objectName, minio.StatObjectOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject failed", err) + return + } + stOpts := minio.StatObjectOptions{} - stOpts.SetMatchETag(objInfo.ETag) + stOpts.SetMatchETag(oi.ETag) objInfo, err = c.StatObject(bucketName, objectName, stOpts) if err != nil { logError(testName, function, args, startTime, "", "CopyObject ETag should match and not fail", err) @@ -3491,15 +3499,11 @@ func testFunctional() { args = map[string]interface{}{ "bucketName": bucketName, } - readOnlyPolicyRet, err := c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if readOnlyPolicyRet == "" { - logError(testName, function, args, startTime, "", "policy should be set", err) - return - } // Make the bucket 'public writeonly'. function = "SetBucketPolicy(bucketName, writeOnlyPolicy)" @@ -3523,17 +3527,12 @@ func testFunctional() { "bucketName": bucketName, } - writeOnlyPolicyRet, err := c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if writeOnlyPolicyRet == "" { - logError(testName, function, args, startTime, "", "policy should be set", err) - return - } - // Make the bucket 'public read/write'. function = "SetBucketPolicy(bucketName, readWritePolicy)" functionAll += ", " + function @@ -3556,17 +3555,12 @@ func testFunctional() { args = map[string]interface{}{ "bucketName": bucketName, } - readWritePolicyRet, err := c.GetBucketPolicy(bucketName) + _, err = c.GetBucketPolicy(bucketName) if err != nil { logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err) return } - if readWritePolicyRet == "" { - logError(testName, function, args, startTime, "", "policy should be set", err) - return - } - // List all buckets. function = "ListBuckets()" functionAll += ", " + function @@ -4543,7 +4537,7 @@ func testFPutObjectV2() { logError(testName, function, args, startTime, "", "StatObject failed", err) return } - if rGTar.ContentType != "application/x-gtar" { + if rGTar.ContentType != "application/x-gtar" && rGTar.ContentType != "application/octet-stream" { logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err) return } @@ -5698,6 +5692,153 @@ func testDecryptedCopyObject() { successLogger(testName, function, args, startTime).Info() } +// Test Core CopyObjectPart implementation +func testCoreEncryptedCopyObjectPart() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "CopyObjectPart(destination, source)" + args := map[string]interface{}{} + + // Instantiate new minio client object + client, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio v4 client object creation failed", err) + return + } + + // Instantiate new core client object. + c := minio.Core{client} + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test") + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + } + defer cleanupBucket(bucketName, client) + // Make a buffer with 5MB of data + buf := bytes.Repeat([]byte("abcde"), 1024*1024) + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + password := "correct horse battery staple" + srcencryption := encrypt.DefaultPBKDF([]byte(password), []byte(bucketName+objectName)) + + objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{ + "Content-Type": "binary/octet-stream", + }, srcencryption) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject call failed", err) + } + + if objInfo.Size != int64(len(buf)) { + logError(testName, function, args, startTime, "", fmt.Sprintf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size), err) + } + + destBucketName := bucketName + destObjectName := objectName + "-dest" + dstencryption := encrypt.DefaultPBKDF([]byte(password), []byte(destBucketName+destObjectName)) + + uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, minio.PutObjectOptions{ServerSideEncryption: dstencryption}) + if err != nil { + logError(testName, function, args, startTime, "", "NewMultipartUpload call failed", err) + } + + // Content of the destination object will be two copies of + // `objectName` concatenated, followed by first byte of + // `objectName`. + metadata := make(map[string]string) + header := make(http.Header) + encrypt.SSECopy(srcencryption).Marshal(header) + dstencryption.Marshal(header) + for k, v := range header { + metadata[k] = v[0] + } + // First of three parts + fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Second of three parts + sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Last of three parts + lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, metadata) + if err != nil { + logError(testName, function, args, startTime, "", "CopyObjectPart call failed", err) + } + + // Complete the multipart upload + _, err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []minio.CompletePart{fstPart, sndPart, lstPart}) + if err != nil { + logError(testName, function, args, startTime, "", "CompleteMultipartUpload call failed", err) + } + + // Stat the object and check its length matches + objInfo, err = c.StatObject(destBucketName, destObjectName, minio.StatObjectOptions{minio.GetObjectOptions{ServerSideEncryption: dstencryption}}) + if err != nil { + logError(testName, function, args, startTime, "", "StatObject call failed", err) + } + + if objInfo.Size != (5*1024*1024)*2+1 { + logError(testName, function, args, startTime, "", "Destination object has incorrect size!", err) + } + + // Now we read the data back + getOpts := minio.GetObjectOptions{ServerSideEncryption: dstencryption} + getOpts.SetRange(0, 5*1024*1024-1) + r, _, err := c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf := make([]byte, 5*1024*1024) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf, buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in first 5MB", err) + } + + getOpts.SetRange(5*1024*1024, 0) + r, _, err = c.GetObject(destBucketName, destObjectName, getOpts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject call failed", err) + } + getBuf = make([]byte, 5*1024*1024+1) + _, err = io.ReadFull(r, getBuf) + if err != nil { + logError(testName, function, args, startTime, "", "Read buffer failed", err) + } + if !bytes.Equal(getBuf[:5*1024*1024], buf) { + logError(testName, function, args, startTime, "", "Got unexpected data in second 5MB", err) + } + if getBuf[5*1024*1024] != buf[0] { + logError(testName, function, args, startTime, "", "Got unexpected data in last byte of copied object!", err) + } + + successLogger(testName, function, args, startTime).Info() + + // Do not need to remove destBucketName its same as bucketName. +} func testUserMetadataCopying() { // initialize logging params startTime := time.Now() @@ -7030,6 +7171,167 @@ func testFGetObjectWithContext() { } +// Test get object ACLs with GetObjectACL +func testGetObjectACL() { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "GetObjectACL(bucketName, objectName)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + } + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // skipping region functional tests for non s3 runs + if os.Getenv(serverEndpoint) != "s3.amazonaws.com" { + ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info() + return + } + + // Instantiate new minio client object. + c, err := minio.NewV4( + os.Getenv(serverEndpoint), + os.Getenv(accessKey), + os.Getenv(secretKey), + mustParseBool(os.Getenv(enableHTTPS)), + ) + if err != nil { + logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0") + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(bucketName, "us-east-1") + if err != nil { + logError(testName, function, args, startTime, "", "MakeBucket failed", err) + return + } + + bufSize := dataFileMap["datafile-1-MB"] + var reader = getDataReader("datafile-1-MB") + defer reader.Close() + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData := map[string]string{ + "X-Amz-Acl": "public-read-write", + } + + _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + objectInfo, getObjectACLErr := c.GetObjectACL(bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + s, ok := objectInfo.Metadata["X-Amz-Acl"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Acl\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" canned acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "public-read-write" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Acl\" expected \"public-read-write\" but got"+fmt.Sprintf("%q", s[0]), nil) + return + } + + bufSize = dataFileMap["datafile-1-MB"] + var reader2 = getDataReader("datafile-1-MB") + defer reader2.Close() + // Save the data + objectName = randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + // Add meta data to add a canned acl + metaData = map[string]string{ + "X-Amz-Grant-Read": "id=fooread@minio.go", + "X-Amz-Grant-Write": "id=foowrite@minio.go", + } + + _, err = c.PutObject(bucketName, objectName, reader2, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream", UserMetadata: metaData}) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + // Read the data back + objectInfo, getObjectACLErr = c.GetObjectACL(bucketName, objectName) + if getObjectACLErr == nil { + logError(testName, function, args, startTime, "", "GetObjectACL fail", getObjectACLErr) + return + } + + if len(objectInfo.Metadata) != 3 { + logError(testName, function, args, startTime, "", "GetObjectACL fail expected \"3\" ACLs but got "+fmt.Sprintf(`"%d"`, len(objectInfo.Metadata)), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Read"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Read\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "fooread@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Read\" acl expected \"fooread@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + s, ok = objectInfo.Metadata["X-Amz-Grant-Write"] + if !ok { + logError(testName, function, args, startTime, "", "GetObjectACL fail unable to find \"X-Amz-Grant-Write\"", nil) + return + } + + if len(s) != 1 { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"1\" got "+fmt.Sprintf(`"%d"`, len(s)), nil) + return + } + + if s[0] != "foowrite@minio.go" { + logError(testName, function, args, startTime, "", "GetObjectACL fail \"X-Amz-Grant-Write\" acl expected \"foowrite@minio.go\" got "+fmt.Sprintf("%q", s), nil) + return + } + + // Delete all objects and buckets + if err = cleanupBucket(bucketName, c); err != nil { + logError(testName, function, args, startTime, "", "Cleanup failed", err) + return + } + + successLogger(testName, function, args, startTime).Info() +} + // Test validates putObject with context to see if request cancellation is honored for V2. func testPutObjectWithContextV2() { // initialize logging params @@ -7389,12 +7691,12 @@ func testListObjects() { return } if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err) - return + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() } if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - logError(testName, function, args, startTime, "", "ListObjects doesn't return expected storage class", err) - return + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjects doesn't return expected storage class").Info() } } @@ -7405,12 +7707,12 @@ func testListObjects() { return } if objInfo.Key == objectName1 && objInfo.StorageClass != "STANDARD" { - logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err) - return + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() } if objInfo.Key == objectName2 && objInfo.StorageClass != "REDUCED_REDUNDANCY" { - logError(testName, function, args, startTime, "", "ListObjectsV2 doesn't return expected storage class", err) - return + // Ignored as Gateways (Azure/GCS etc) wont return storage class + ignoredLog(testName, function, args, startTime, "ListObjectsV2 doesn't return expected storage class").Info() } } @@ -7488,6 +7790,7 @@ func main() { testGetObjectWithContext() testFPutObjectWithContext() testFGetObjectWithContext() + testGetObjectACL() testPutObjectWithContext() testStorageClassMetadataPutObject() testStorageClassInvalidMetadataPutObject() @@ -7505,6 +7808,7 @@ func main() { testEncryptedCopyObject() testEncryptedEmptyObject() testDecryptedCopyObject() + testCoreEncryptedCopyObjectPart() } } else { testFunctional() diff --git a/pkg/credentials/file_minio_client.go b/pkg/credentials/file_minio_client.go index c282c2a..6a6827e 100644 --- a/pkg/credentials/file_minio_client.go +++ b/pkg/credentials/file_minio_client.go @@ -62,13 +62,17 @@ func NewFileMinioClient(filename string, alias string) *Credentials { // users home directory. func (p *FileMinioClient) Retrieve() (Value, error) { if p.filename == "" { - homeDir, err := homedir.Dir() - if err != nil { - return Value{}, err - } - p.filename = filepath.Join(homeDir, ".mc", "config.json") - if runtime.GOOS == "windows" { - p.filename = filepath.Join(homeDir, "mc", "config.json") + if value, ok := os.LookupEnv("MINIO_SHARED_CREDENTIALS_FILE"); ok { + p.filename = value + } else { + homeDir, err := homedir.Dir() + if err != nil { + return Value{}, err + } + p.filename = filepath.Join(homeDir, ".mc", "config.json") + if runtime.GOOS == "windows" { + p.filename = filepath.Join(homeDir, "mc", "config.json") + } } } diff --git a/pkg/credentials/iam_aws.go b/pkg/credentials/iam_aws.go index 637df74..6845c9a 100644 --- a/pkg/credentials/iam_aws.go +++ b/pkg/credentials/iam_aws.go @@ -21,8 +21,10 @@ import ( "bufio" "encoding/json" "errors" + "fmt" "net/http" "net/url" + "os" "path" "time" ) @@ -50,16 +52,25 @@ type IAM struct { // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html const ( defaultIAMRoleEndpoint = "http://169.254.169.254" + defaultECSRoleEndpoint = "http://169.254.170.2" defaultIAMSecurityCredsPath = "/latest/meta-data/iam/security-credentials" ) +// https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html +func getEndpoint(endpoint string) (string, bool) { + if endpoint != "" { + return endpoint, os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") != "" + } + if ecsURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); ecsURI != "" { + return fmt.Sprintf("%s%s", defaultECSRoleEndpoint, ecsURI), true + } + return defaultIAMRoleEndpoint, false +} + // NewIAM returns a pointer to a new Credentials object wrapping // the IAM. Takes a ConfigProvider to create a EC2Metadata client. // The ConfigProvider is satisfied by the session.Session type. func NewIAM(endpoint string) *Credentials { - if endpoint == "" { - endpoint = defaultIAMRoleEndpoint - } p := &IAM{ Client: &http.Client{ Transport: http.DefaultTransport, @@ -73,11 +84,17 @@ func NewIAM(endpoint string) *Credentials { // Error will be returned if the request fails, or unable to extract // the desired func (m *IAM) Retrieve() (Value, error) { - roleCreds, err := getCredentials(m.Client, m.endpoint) + endpoint, isEcsTask := getEndpoint(m.endpoint) + var roleCreds ec2RoleCredRespBody + var err error + if isEcsTask { + roleCreds, err = getEcsTaskCredentials(m.Client, endpoint) + } else { + roleCreds, err = getCredentials(m.Client, endpoint) + } if err != nil { return Value{}, err } - // Expiry window is set to 10secs. m.SetExpiration(roleCreds.Expiration, DefaultExpiryWindow) @@ -111,9 +128,6 @@ type ec2RoleCredRespBody struct { // be sent to fetch the rolling access credentials. // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func getIAMRoleURL(endpoint string) (*url.URL, error) { - if endpoint == "" { - endpoint = defaultIAMRoleEndpoint - } u, err := url.Parse(endpoint) if err != nil { return nil, err @@ -153,12 +167,36 @@ func listRoleNames(client *http.Client, u *url.URL) ([]string, error) { return credsList, nil } +func getEcsTaskCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + req, err := http.NewRequest("GET", endpoint, nil) + if err != nil { + return ec2RoleCredRespBody{}, err + } + + resp, err := client.Do(req) + if err != nil { + return ec2RoleCredRespBody{}, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return ec2RoleCredRespBody{}, errors.New(resp.Status) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(resp.Body).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, err + } + + return respCreds, nil +} + // getCredentials - obtains the credentials from the IAM role name associated with // the current EC2 service. // // If the credentials cannot be found, or there is an error // reading the response an error will be returned. func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, error) { + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html u, err := getIAMRoleURL(endpoint) if err != nil { diff --git a/pkg/credentials/iam_aws_test.go b/pkg/credentials/iam_aws_test.go index 86ea66b..4dbbb0a 100644 --- a/pkg/credentials/iam_aws_test.go +++ b/pkg/credentials/iam_aws_test.go @@ -21,6 +21,7 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" "testing" "time" ) @@ -41,6 +42,13 @@ const credsFailRespTmpl = `{ "LastUpdated": "2009-11-23T0:00:00Z" }` +const credsRespEcsTaskTmpl = `{ + "AccessKeyId" : "accessKey", + "SecretAccessKey" : "secret", + "Token" : "token", + "Expiration" : "%s" +}` + func initTestFailServer() *httptest.Server { server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { http.Error(w, "Not allowed", http.StatusBadRequest) @@ -73,6 +81,14 @@ func initTestServer(expireOn string, failAssume bool) *httptest.Server { return server } +func initEcsTaskTestServer(expireOn string) *httptest.Server { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, credsRespEcsTaskTmpl, expireOn) + })) + + return server +} + func TestIAMMalformedEndpoint(t *testing.T) { creds := NewIAM("%%%%") _, err := creds.Get() @@ -195,3 +211,33 @@ func TestIAMIsExpired(t *testing.T) { t.Error("Expected creds to be expired when curren time has changed") } } + +func TestEcsTask(t *testing.T) { + server := initEcsTaskTestServer("2014-12-16T01:51:37Z") + defer server.Close() + p := &IAM{ + Client: http.DefaultClient, + endpoint: server.URL, + } + os.Setenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI", "/v2/credentials?id=task_credential_id") + creds, err := p.Retrieve() + os.Unsetenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + if err != nil { + t.Errorf("Unexpected failure %s", err) + } + if "accessKey" != creds.AccessKeyID { + t.Errorf("Expected \"accessKey\", got %s", creds.AccessKeyID) + } + + if "secret" != creds.SecretAccessKey { + t.Errorf("Expected \"secret\", got %s", creds.SecretAccessKey) + } + + if "token" != creds.SessionToken { + t.Errorf("Expected \"token\", got %s", creds.SessionToken) + } + + if !p.IsExpired() { + t.Error("Expected creds to be expired.") + } +} diff --git a/pkg/policy/bucket-policy.go b/pkg/policy/bucket-policy.go index 9d5f5b3..79fd801 100644 --- a/pkg/policy/bucket-policy.go +++ b/pkg/policy/bucket-policy.go @@ -557,7 +557,6 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP } else { matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource) } - if !matchedObjResources.IsEmpty() { readOnly, writeOnly := getObjectPolicy(s) for resource := range matchedObjResources { @@ -571,7 +570,8 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP matchedResource = resource } } - } else if s.Resources.Contains(bucketResource) { + } + if s.Resources.Contains(bucketResource) { commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix) bucketCommonFound = bucketCommonFound || commonFound bucketReadOnly = bucketReadOnly || readOnly @@ -605,6 +605,7 @@ func GetPolicies(statements []Statement, bucketName, prefix string) map[string]B } } } + // Pretend that policy resource as an actual object and fetch its policy for r := range objResources { // Put trailing * if exists in asterisk @@ -613,7 +614,10 @@ func GetPolicies(statements []Statement, bucketName, prefix string) map[string]B r = r[:len(r)-1] asterisk = "*" } - objectPath := r[len(awsResourcePrefix+bucketName)+1:] + var objectPath string + if len(r) >= len(awsResourcePrefix+bucketName)+1 { + objectPath = r[len(awsResourcePrefix+bucketName)+1:] + } p := GetPolicy(statements, bucketName, objectPath) policyRules[bucketName+"/"+objectPath+asterisk] = p } diff --git a/pkg/policy/bucket-policy_test.go b/pkg/policy/bucket-policy_test.go index b6b4551..1a71d87 100644 --- a/pkg/policy/bucket-policy_test.go +++ b/pkg/policy/bucket-policy_test.go @@ -1592,6 +1592,7 @@ func TestListBucketPolicies(t *testing.T) { downloadUploadCondKeyMap.Add("s3:prefix", set.CreateStringSet("both")) downloadUploadCondMap.Add("StringEquals", downloadUploadCondKeyMap) + commonSetActions := commonBucketActions.Union(readOnlyBucketActions) testCases := []struct { statements []Statement bucketName string @@ -1630,6 +1631,13 @@ func TestListBucketPolicies(t *testing.T) { Principal: User{AWS: set.CreateStringSet("*")}, Resources: set.CreateStringSet("arn:aws:s3:::mybucket/download*"), }}, "mybucket", "", map[string]BucketPolicy{"mybucket/download*": BucketPolicyReadOnly}}, + {[]Statement{ + { + Actions: commonSetActions.Union(readOnlyObjectActions), + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::mybucket/*"), + }}, "mybucket", "", map[string]BucketPolicy{"mybucket/*": BucketPolicyReadOnly}}, // Write Only {[]Statement{ { diff --git a/pkg/s3utils/utils.go b/pkg/s3utils/utils.go index bfeb73e..adceb7f 100644 --- a/pkg/s3utils/utils.go +++ b/pkg/s3utils/utils.go @@ -143,11 +143,40 @@ func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { } // IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. +// See https://aws.amazon.com/compliance/fips. func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { if endpointURL == sentinelURL { return false } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" + return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" +} + +// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { + if endpointURL == sentinelURL { + return false + } + switch endpointURL.Host { + case "s3-fips.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-west-1.amazonaws.com": + case "s3-fips.dualstack.us-west-2.amazonaws.com": + case "s3-fips.dualstack.us-east-2.amazonaws.com": + case "s3-fips.dualstack.us-east-1.amazonaws.com": + case "s3-fips.us-west-1.amazonaws.com": + case "s3-fips.us-west-2.amazonaws.com": + case "s3-fips.us-east-1.amazonaws.com": + default: + return false + } + return true +} + +// IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. +// See https://aws.amazon.com/compliance/fips. +func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { + return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) } // IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint. diff --git a/post-policy.go b/post-policy.go index b3ae705..c285fde 100644 --- a/post-policy.go +++ b/post-policy.go @@ -206,6 +206,28 @@ func (p *PostPolicy) SetUserMetadata(key string, value string) error { return nil } +// SetUserData - Set user data as a key/value couple. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserData(key string, value string) error { + if key == "" { + return ErrInvalidArgument("Key is empty") + } + if value == "" { + return ErrInvalidArgument("Value is empty") + } + headerName := fmt.Sprintf("x-amz-%s", key) + policyCond := policyCondition{ + matchType: "eq", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + // addNewPolicy - internal helper to validate adding new policies. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { @@ -85,36 +85,32 @@ func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duratio return attemptCh } -// isNetErrorRetryable - is network error retryable. -func isNetErrorRetryable(err error) bool { +// isHTTPReqErrorRetryable - is http requests error retryable, such +// as i/o timeout, connection broken etc.. +func isHTTPReqErrorRetryable(err error) bool { if err == nil { return false } - switch err.(type) { - case net.Error: - switch err.(type) { + switch e := err.(type) { + case *url.Error: + switch e.Err.(type) { case *net.DNSError, *net.OpError, net.UnknownNetworkError: return true - case *url.Error: - // For a URL error, where it replies back "connection closed" - // retry again. - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } - default: - if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError, retry. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError, retry. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout, retry. - return true - } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { - // If error is transport connection broken, retry. - return true - } + } + if strings.Contains(err.Error(), "Connection closed by foreign host") { + return true + } else if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + // If error is - tlsHandshakeTimeoutError, retry. + return true + } else if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError, retry. + return true + } else if strings.Contains(err.Error(), "connection timed out") { + // If err is a net.Dial timeout, retry. + return true + } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { + // If error is transport connection broken, retry. + return true } } return false diff --git a/s3-error.go b/s3-error.go index f9e8233..3b11776 100644 --- a/s3-error.go +++ b/s3-error.go @@ -34,7 +34,7 @@ var s3ErrorResponseMap = map[string]string{ "MissingContentLength": "You must provide the Content-Length HTTP header.", "MissingContentMD5": "Missing required header for this request: Content-Md5.", "MissingRequestBodyError": "Request body is empty.", - "NoSuchBucket": "The specified bucket does not exist", + "NoSuchBucket": "The specified bucket does not exist.", "NoSuchBucketPolicy": "The bucket policy does not exist", "NoSuchKey": "The specified key does not exist.", "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", @@ -223,6 +223,7 @@ var supportedHeaders = []string{ "content-disposition", "content-language", "x-amz-website-redirect-location", + "expires", // Add more supported headers here. } @@ -267,5 +268,5 @@ func isSSEHeader(headerKey string) bool { func isAmzHeader(headerKey string) bool { key := strings.ToLower(headerKey) - return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl" + return strings.HasPrefix(key, "x-amz-meta-") || strings.HasPrefix(key, "x-amz-grant-") || key == "x-amz-acl" || isSSEHeader(headerKey) } |