summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFélix Sipma <felix+debian@gueux.org>2018-01-05 11:09:44 +0100
committerFélix Sipma <felix+debian@gueux.org>2018-01-05 11:09:44 +0100
commit9aa20edb43ab70f1865d4d1ae680939faa46c8b7 (patch)
tree66742c50531fcce59814c2e55f8fa1f0a2e5463f
parentfd69b3bb0c26856d0938842c0e4e7d3ff959ca3d (diff)
parent2e53196f9027ebb270b9e9a251ad39383a500c8f (diff)
Update upstream source from tag 'upstream/4.0.5'
Update to upstream version '4.0.5' with Debian dir fc9f5488be66217572c92dce9419d3a3fba2cc24
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml20
-rw-r--r--MAINTAINERS.md28
-rw-r--r--Makefile17
-rw-r--r--NOTICE2
-rw-r--r--README.md61
-rw-r--r--README_zh_CN.md246
-rw-r--r--api-compose-object.go141
-rw-r--r--api-compose-object_test.go3
-rw-r--r--api-datatypes.go5
-rw-r--r--api-error-response.go71
-rw-r--r--api-error-response_test.go108
-rw-r--r--api-get-object-context.go26
-rw-r--r--api-get-object-file.go35
-rw-r--r--api-get-object.go119
-rw-r--r--api-get-options.go (renamed from request-headers.go)67
-rw-r--r--api-get-policy.go12
-rw-r--r--api-list.go40
-rw-r--r--api-notification.go27
-rw-r--r--api-presigned.go82
-rw-r--r--api-put-bucket.go45
-rw-r--r--api-put-object-common.go57
-rw-r--r--api-put-object-context.go39
-rw-r--r--api-put-object-copy.go3
-rw-r--r--api-put-object-encrypted.go20
-rw-r--r--api-put-object-file-context.go64
-rw-r--r--api-put-object-file.go51
-rw-r--r--api-put-object-multipart.go139
-rw-r--r--api-put-object-streaming.go117
-rw-r--r--api-put-object.go328
-rw-r--r--api-put-object_test.go53
-rw-r--r--api-remove.go46
-rw-r--r--api-s3-datatypes.go5
-rw-r--r--api-stat.go42
-rw-r--r--api.go93
-rw-r--r--api_functional_v2_test.go1470
-rw-r--r--api_functional_v4_test.go2410
-rw-r--r--api_unit_test.go166
-rw-r--r--appveyor.yml3
-rw-r--r--bucket-cache.go43
-rw-r--r--bucket-cache_test.go9
-rw-r--r--bucket-notification.go3
-rw-r--r--constants.go5
-rw-r--r--core.go67
-rw-r--r--core_test.go464
-rw-r--r--docs/API.md781
-rw-r--r--docs/checker.go.template21
-rw-r--r--docs/validator.go227
-rw-r--r--docs/zh_CN/API.md1820
-rw-r--r--docs/zh_CN/CONTRIBUTING.md22
-rw-r--r--examples/minio/listenbucketnotification.go3
-rw-r--r--examples/s3/bucketexists.go3
-rw-r--r--examples/s3/composeobject.go24
-rw-r--r--examples/s3/copyobject.go8
-rw-r--r--examples/s3/fgetobject-context.go54
-rw-r--r--examples/s3/fgetobject.go5
-rw-r--r--examples/s3/fputencrypted-object.go80
-rw-r--r--examples/s3/fputobject-context.go53
-rw-r--r--examples/s3/fputobject.go7
-rw-r--r--examples/s3/get-encrypted-object.go3
-rw-r--r--examples/s3/getbucketnotification.go3
-rw-r--r--examples/s3/getbucketpolicy.go3
-rw-r--r--examples/s3/getobject-context.go73
-rw-r--r--examples/s3/getobject.go5
-rw-r--r--examples/s3/listbucketpolicies.go3
-rw-r--r--examples/s3/listbuckets.go3
-rw-r--r--examples/s3/listincompleteuploads.go3
-rw-r--r--examples/s3/listobjects-N.go3
-rw-r--r--examples/s3/listobjects.go3
-rw-r--r--examples/s3/listobjectsV2.go3
-rw-r--r--examples/s3/makebucket.go3
-rw-r--r--examples/s3/presignedgetobject.go3
-rw-r--r--examples/s3/presignedheadobject.go54
-rw-r--r--examples/s3/presignedpostpolicy.go3
-rw-r--r--examples/s3/presignedputobject.go3
-rw-r--r--examples/s3/put-encrypted-object.go5
-rw-r--r--examples/s3/putobject-context.go68
-rw-r--r--examples/s3/putobject-getobject-sse.go20
-rw-r--r--examples/s3/putobject-progress.go7
-rw-r--r--examples/s3/putobject-s3-accelerate.go12
-rw-r--r--examples/s3/putobject-streaming.go5
-rw-r--r--examples/s3/putobject.go9
-rw-r--r--examples/s3/removeallbucketnotification.go3
-rw-r--r--examples/s3/removebucket.go3
-rw-r--r--examples/s3/removeincompleteupload.go3
-rw-r--r--examples/s3/removeobject.go3
-rw-r--r--examples/s3/removeobjects.go12
-rw-r--r--examples/s3/setbucketnotification.go3
-rw-r--r--examples/s3/setbucketpolicy.go3
-rw-r--r--examples/s3/statobject.go5
-rw-r--r--functional_tests.go6721
-rw-r--r--get-options_test.go (renamed from request-headers_test.go)11
-rw-r--r--hook-reader.go3
-rw-r--r--pkg/credentials/chain.go34
-rw-r--r--pkg/credentials/chain_test.go11
-rw-r--r--pkg/credentials/credentials.go2
-rw-r--r--pkg/credentials/credentials_test.go2
-rw-r--r--pkg/credentials/doc.go17
-rw-r--r--pkg/credentials/env_aws.go2
-rw-r--r--pkg/credentials/env_minio.go2
-rw-r--r--pkg/credentials/env_test.go2
-rw-r--r--pkg/credentials/file_aws_credentials.go4
-rw-r--r--pkg/credentials/file_minio_client.go4
-rw-r--r--pkg/credentials/file_test.go2
-rw-r--r--pkg/credentials/iam_aws.go17
-rw-r--r--pkg/credentials/iam_aws_test.go17
-rw-r--r--pkg/credentials/signature-type.go3
-rw-r--r--pkg/credentials/static.go2
-rw-r--r--pkg/credentials/static_test.go2
-rw-r--r--pkg/encrypt/cbc.go3
-rw-r--r--pkg/encrypt/interface.go3
-rw-r--r--pkg/encrypt/keys.go3
-rw-r--r--pkg/policy/bucket-policy-condition.go3
-rw-r--r--pkg/policy/bucket-policy-condition_test.go3
-rw-r--r--pkg/policy/bucket-policy.go3
-rw-r--r--pkg/policy/bucket-policy_test.go3
-rw-r--r--pkg/s3signer/request-signature-streaming.go7
-rw-r--r--pkg/s3signer/request-signature-streaming_test.go7
-rw-r--r--pkg/s3signer/request-signature-v2.go50
-rw-r--r--pkg/s3signer/request-signature-v2_test.go3
-rw-r--r--pkg/s3signer/request-signature-v4.go3
-rw-r--r--pkg/s3signer/request-signature_test.go3
-rw-r--r--pkg/s3signer/test-utils_test.go3
-rw-r--r--pkg/s3signer/utils.go3
-rw-r--r--pkg/s3signer/utils_test.go10
-rw-r--r--pkg/s3utils/utils.go13
-rw-r--r--pkg/s3utils/utils_test.go7
-rw-r--r--pkg/set/stringset.go3
-rw-r--r--pkg/set/stringset_test.go3
-rw-r--r--post-policy.go39
-rw-r--r--retry-continous.go17
-rw-r--r--retry.go3
-rw-r--r--s3-endpoints.go3
-rw-r--r--s3-error.go3
-rw-r--r--test-utils_test.go7
-rw-r--r--transport.go48
-rw-r--r--utils.go93
-rw-r--r--utils_test.go106
138 files changed, 12311 insertions, 5297 deletions
diff --git a/.gitignore b/.gitignore
index acf19db..fa967ab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
*~
-*.test \ No newline at end of file
+*.test
+validator
diff --git a/.travis.yml b/.travis.yml
index 066cbe8..4ae1ead 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -9,12 +9,22 @@ env:
- ARCH=i686
go:
-- 1.5.3
-- 1.6
- 1.7.4
-- 1.8
+- 1.8.x
+- 1.9.x
+- tip
+
+matrix:
+ fast_finish: true
+ allow_failures:
+ - go: tip
+
+addons:
+ apt:
+ packages:
+ - devscripts
script:
- diff -au <(gofmt -d .) <(printf "")
-- go vet ./...
-- go test -short -race -v ./...
+- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
+- make
diff --git a/MAINTAINERS.md b/MAINTAINERS.md
index 6dbef62..1797307 100644
--- a/MAINTAINERS.md
+++ b/MAINTAINERS.md
@@ -5,15 +5,31 @@
Please go through this link [Maintainer Responsibility](https://gist.github.com/abperiasamy/f4d9b31d3186bbd26522)
### Making new releases
+Tag and sign your release commit, additionally this step requires you to have access to Minio's trusted private key.
+```sh
+$ export GNUPGHOME=/media/${USER}/minio/trusted
+$ git tag -s 4.0.0
+$ git push
+$ git push --tags
+```
-Edit `libraryVersion` constant in `api.go`.
+### Update version
+Once release has been made update `libraryVersion` constant in `api.go` to next to be released version.
-```
+```sh
$ grep libraryVersion api.go
- libraryVersion = "0.3.0"
+ libraryVersion = "4.0.1"
```
+Commit your changes
+```
+$ git commit -a -m "Update version for next release" --author "Minio Trusted <trusted@minio.io>"
+```
+
+### Announce
+Announce new release by adding release notes at https://github.com/minio/minio-go/releases from `trusted@minio.io` account. Release notes requires two sections `highlights` and `changelog`. Highlights is a bulleted list of salient features in this release and Changelog contains list of all commits since the last release.
+
+To generate `changelog`
+```sh
+$ git log --no-color --pretty=format:'-%d %s (%cr) <%an>' <last_release_tag>..<latest_release_tag>
```
-$ git tag 0.3.0
-$ git push --tags
-``` \ No newline at end of file
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..8e0dd25
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,17 @@
+all: checks
+
+checks:
+ @go get -u github.com/go-ini/ini/...
+ @go get -u github.com/mitchellh/go-homedir/...
+ @go get -u github.com/cheggaaa/pb/...
+ @go get -u github.com/sirupsen/logrus/...
+ @go get -u github.com/dustin/go-humanize/...
+ @go vet ./...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go test -race -v ./...
+ @SERVER_ENDPOINT=play.minio.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 go run functional_tests.go
+ @mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
+ @go get -u github.com/a8m/mark/...
+ @go get -u github.com/minio/cli/...
+ @go get -u golang.org/x/tools/cmd/goimports
+ @go get -u github.com/gernest/wow/...
+ @go build docs/validator.go && ./validator -m docs/API.md -t docs/checker.go.tpl
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..c521791
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,2 @@
+minio-go
+Copyright 2015-2017 Minio, Inc. \ No newline at end of file
diff --git a/README.md b/README.md
index 4a91dc9..2dedc1a 100644
--- a/README.md
+++ b/README.md
@@ -1,19 +1,7 @@
-# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
+# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) [![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/minio/minio-go/blob/master/LICENSE)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
-**Supported cloud storage providers:**
-
-- AWS Signature Version 4
- - Amazon S3
- - Minio
-
-- AWS Signature Version 2
- - Google Cloud Storage (Compatibility Mode)
- - Openstack Swift + Swift3 middleware
- - Ceph Object Gateway
- - Riak CS
-
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
@@ -54,7 +42,8 @@ func main() {
log.Fatalln(err)
}
- log.Println("%v", minioClient) // minioClient is now setup
+ log.Printf("%#v\n", minioClient) // minioClient is now setup
+}
```
## Quick Start Example - File Uploader
@@ -105,7 +94,7 @@ func main() {
contentType := "application/zip"
// Upload the zip file with FPutObject
- n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
if err != nil {
log.Fatalln(err)
}
@@ -130,7 +119,6 @@ The full API Reference is available here.
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
-
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
@@ -140,27 +128,27 @@ The full API Reference is available here.
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
-
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API Reference : Bucket notification Operations
-
* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
### API Reference : File Object Operations
-
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
### API Reference : Object Operations
-
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
@@ -169,14 +157,13 @@ The full API Reference is available here.
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference: Encrypted Object Operations
-
* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
### API Reference : Presigned Operations
-
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
@@ -185,11 +172,9 @@ The full API Reference is available here.
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
-
## Full Examples
-#### Full Examples : Bucket Operations
-
+### Full Examples : Bucket Operations
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
@@ -198,42 +183,43 @@ The full API Reference is available here.
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
-#### Full Examples : Bucket policy Operations
-
+### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
-#### Full Examples : Bucket notification Operations
-
+### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
-#### Full Examples : File Object Operations
-
+### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
-#### Full Examples : Object Operations
-
+### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
-#### Full Examples : Encrypted Object Operations
-
+### Full Examples : Encrypted Object Operations
* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
-#### Full Examples : Presigned Operations
+### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
@@ -242,9 +228,10 @@ The full API Reference is available here.
* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app)
## Contribute
-
[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
+## License
+This SDK is distributed under the [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), see [LICENSE](./LICENSE) and [NOTICE](./NOTICE) for more information.
diff --git a/README_zh_CN.md b/README_zh_CN.md
new file mode 100644
index 0000000..5584f42
--- /dev/null
+++ b/README_zh_CN.md
@@ -0,0 +1,246 @@
+# 适用于与Amazon S3兼容云存储的Minio Go SDK [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge)
+
+Minio Go Client SDK提供了简单的API来访问任何与Amazon S3兼容的对象存储服务。
+
+**支持的云存储:**
+
+- AWS Signature Version 4
+ - Amazon S3
+ - Minio
+
+- AWS Signature Version 2
+ - Google Cloud Storage (兼容模式)
+ - Openstack Swift + Swift3 middleware
+ - Ceph Object Gateway
+ - Riak CS
+
+本文我们将学习如何安装Minio client SDK,连接到Minio,并提供一下文件上传的示例。对于完整的API以及示例,请参考[Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference)。
+
+本文假设你已经有 [Go开发环境](https://docs.minio.io/docs/how-to-install-golang)。
+
+## 从Github下载
+```sh
+go get -u github.com/minio/minio-go
+```
+
+## 初始化Minio Client
+Minio client需要以下4个参数来连接与Amazon S3兼容的对象存储。
+
+| 参数 | 描述|
+| :--- | :--- |
+| endpoint | 对象存储服务的URL |
+| accessKeyID | Access key是唯一标识你的账户的用户ID。 |
+| secretAccessKey | Secret key是你账户的密码。 |
+| secure | true代表使用HTTPS |
+
+
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化 minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("%#v\n", minioClient) // minioClient初使化成功
+}
+```
+
+## 示例-文件上传
+本示例连接到一个对象存储服务,创建一个存储桶并上传一个文件到存储桶中。
+
+我们在本示例中使用运行在 [https://play.minio.io:9000](https://play.minio.io:9000) 上的Minio服务,你可以用这个服务来开发和测试。示例中的访问凭据是公开的。
+
+### FileUploader.go
+```go
+package main
+
+import (
+ "github.com/minio/minio-go"
+ "log"
+)
+
+func main() {
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // 初使化minio client对象。
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // 创建一个叫mymusic的存储桶。
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // 检查存储桶是否已经存在。
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // 上传一个zip文件。
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // 使用FPutObject上传一个zip文件。
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, minio.PutObjectOptions{ContentType:contentType})
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
+}
+```
+
+### 运行FileUploader
+```sh
+go run file-uploader.go
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
+
+mc ls play/mymusic/
+[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+```
+
+## API文档
+完整的API文档在这里。
+* [完整API文档](https://docs.minio.io/docs/golang-client-api-reference)
+
+### API文档 : 操作存储桶
+* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
+* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
+* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
+* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
+* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
+* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
+
+### API文档 : 存储桶策略
+* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
+* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
+
+### API文档 : 存储桶通知
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
+
+### API文档 : 操作文件对象
+* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
+* [`FPutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FPutObjectWithContext)
+* [`FGetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#FGetObjectWithContext)
+
+### API文档 : 操作对象
+* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
+* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
+* [`GetObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#GetObjectWithContext)
+* [`PutObjectWithContext`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectWithContext)
+* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming)
+* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
+* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
+* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
+
+### API文档: 操作加密对象
+* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject)
+* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject)
+
+### API文档 : Presigned操作
+* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
+* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
+* [`PresignedHeadObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedHeadObject)
+* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
+
+### API文档 : 客户端自定义设置
+* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
+* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
+* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
+* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
+
+## 完整示例
+
+### 完整示例 : 操作存储桶
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
+
+### 完整示例 : 存储桶策略
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+
+### 完整示例 : 存储桶通知
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio扩展)
+
+### 完整示例 : 操作文件对象
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
+* [fputobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject-context.go)
+* [fgetobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject-context.go)
+
+### 完整示例 : 操作对象
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [putobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject-context.go)
+* [getobject-context.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject-context.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
+
+### 完整示例 : 操作加密对象
+* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go)
+* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go)
+* [fput-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputencrypted-object.go)
+
+### 完整示例 : Presigned操作
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedheadobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedheadobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
+
+## 了解更多
+* [完整文档](https://docs.minio.io)
+* [Minio Go Client SDK API文档](https://docs.minio.io/docs/golang-client-api-reference)
+* [Go 音乐播放器完整示例](https://docs.minio.io/docs/go-music-player-app)
+
+## 贡献
+[贡献指南](https://github.com/minio/minio-go/blob/master/docs/zh_CN/CONTRIBUTING.md)
+
+[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
+[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
+
diff --git a/api-compose-object.go b/api-compose-object.go
index 6baf09e..81314e3 100644
--- a/api-compose-object.go
+++ b/api-compose-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"encoding/base64"
"fmt"
"net/http"
@@ -58,7 +60,7 @@ func (s *SSEInfo) getSSEHeaders(isCopySource bool) map[string]string {
return map[string]string{
"x-amz-" + cs + "server-side-encryption-customer-algorithm": s.algo,
"x-amz-" + cs + "server-side-encryption-customer-key": base64.StdEncoding.EncodeToString(s.key),
- "x-amz-" + cs + "server-side-encryption-customer-key-MD5": base64.StdEncoding.EncodeToString(sumMD5(s.key)),
+ "x-amz-" + cs + "server-side-encryption-customer-key-MD5": sumMD5Base64(s.key),
}
}
@@ -115,7 +117,7 @@ func NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo,
k = k[len("x-amz-meta-"):]
}
if _, ok := m[k]; ok {
- return d, fmt.Errorf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k)
+ return d, ErrInvalidArgument(fmt.Sprintf("Cannot add both %s and x-amz-meta-%s keys as custom metadata", k, k))
}
m[k] = v
}
@@ -243,13 +245,13 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
// Get object info - need size and etag here. Also, decryption
// headers are added to the stat request if given.
var objInfo ObjectInfo
- rh := NewGetReqHeaders()
+ opts := StatObjectOptions{}
for k, v := range s.decryptKey.getSSEHeaders(false) {
- rh.Set(k, v)
+ opts.Set(k, v)
}
- objInfo, err = c.statObject(s.bucket, s.object, rh)
+ objInfo, err = c.statObject(context.Background(), s.bucket, s.object, opts)
if err != nil {
- err = fmt.Errorf("Could not stat object - %s/%s: %v", s.bucket, s.object, err)
+ err = ErrInvalidArgument(fmt.Sprintf("Could not stat object - %s/%s: %v", s.bucket, s.object, err))
} else {
size = objInfo.Size
etag = objInfo.ETag
@@ -265,10 +267,105 @@ func (s *SourceInfo) getProps(c Client) (size int64, etag string, userMeta map[s
return
}
+// Low level implementation of CopyObject API, supports only upto 5GiB worth of copy.
+func (c Client) copyObjectDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string,
+ metadata map[string]string) (ObjectInfo, error) {
+
+ // Build headers.
+ headers := make(http.Header)
+
+ // Set all the metadata headers.
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ // Set the source header
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ // Send upload-part-copy request
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return ObjectInfo{}, httpRespToErrorResponse(resp, srcBucket, srcObject)
+ }
+
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return ObjectInfo{}, err
+ }
+
+ objInfo := ObjectInfo{
+ Key: destObject,
+ ETag: strings.Trim(cpObjRes.ETag, "\""),
+ LastModified: cpObjRes.LastModified,
+ }
+ return objInfo, nil
+}
+
+func (c Client) copyObjectPartDo(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset int64, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ headers := make(http.Header)
+
+ // Set source
+ headers.Set("x-amz-copy-source", s3utils.EncodePath(srcBucket+"/"+srcObject))
+
+ if startOffset < 0 {
+ return p, ErrInvalidArgument("startOffset must be non-negative")
+ }
+
+ if length >= 0 {
+ headers.Set("x-amz-copy-source-range", fmt.Sprintf("bytes=%d-%d", startOffset, startOffset+length-1))
+ }
+
+ for k, v := range metadata {
+ headers.Set(k, v)
+ }
+
+ queryValues := make(url.Values)
+ queryValues.Set("partNumber", strconv.Itoa(partID))
+ queryValues.Set("uploadId", uploadID)
+
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
+ bucketName: destBucket,
+ objectName: destObject,
+ customHeader: headers,
+ queryValues: queryValues,
+ })
+ defer closeResponse(resp)
+ if err != nil {
+ return
+ }
+
+ // Check if we got an error response.
+ if resp.StatusCode != http.StatusOK {
+ return p, httpRespToErrorResponse(resp, destBucket, destObject)
+ }
+
+ // Decode copy-part response on success.
+ cpObjRes := copyObjectResult{}
+ err = xmlDecoder(resp.Body, &cpObjRes)
+ if err != nil {
+ return p, err
+ }
+ p.PartNumber, p.ETag = partID, cpObjRes.ETag
+ return p, nil
+}
+
// uploadPartCopy - helper function to create a part in a multipart
// upload via an upload-part-copy request
// https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadUploadPartCopy.html
-func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
+func (c Client) uploadPartCopy(ctx context.Context, bucket, object, uploadID string, partNumber int,
headers http.Header) (p CompletePart, err error) {
// Build query parameters
@@ -277,7 +374,7 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
urlValues.Set("uploadId", uploadID)
// Send upload-part-copy request
- resp, err := c.executeMethod("PUT", requestMetadata{
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: bucket,
objectName: object,
customHeader: headers,
@@ -309,9 +406,9 @@ func (c Client) uploadPartCopy(bucket, object, uploadID string, partNumber int,
// server-side copying operations.
func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(srcs) < 1 || len(srcs) > maxPartsCount {
- return ErrInvalidArgument("There must be as least one and upto 10000 source objects.")
+ return ErrInvalidArgument("There must be as least one and up to 10000 source objects.")
}
-
+ ctx := context.Background()
srcSizes := make([]int64, len(srcs))
var totalSize, size, totalParts int64
var srcUserMeta map[string]string
@@ -320,7 +417,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
for i, src := range srcs {
size, etag, srcUserMeta, err = src.getProps(c)
if err != nil {
- return fmt.Errorf("Could not get source props for %s/%s: %v", src.bucket, src.object, err)
+ return err
}
// Error out if client side encryption is used in this source object when
@@ -396,7 +493,7 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
}
// Send copy request
- resp, err := c.executeMethod("PUT", requestMetadata{
+ resp, err := c.executeMethod(ctx, "PUT", requestMetadata{
bucketName: dst.bucket,
objectName: dst.object,
customHeader: h,
@@ -426,13 +523,13 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
if len(userMeta) == 0 && len(srcs) == 1 {
metaMap = srcUserMeta
}
- metaHeaders := make(map[string][]string)
+ metaHeaders := make(map[string]string)
for k, v := range metaMap {
- metaHeaders[k] = append(metaHeaders[k], v)
+ metaHeaders[k] = v
}
- uploadID, err := c.newUploadID(dst.bucket, dst.object, metaHeaders)
+ uploadID, err := c.newUploadID(ctx, dst.bucket, dst.object, PutObjectOptions{UserMetadata: metaHeaders})
if err != nil {
- return fmt.Errorf("Error creating new upload: %v", err)
+ return err
}
// 2. Perform copy part uploads
@@ -457,10 +554,10 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
fmt.Sprintf("bytes=%d-%d", start, end))
// make upload-part-copy request
- complPart, err := c.uploadPartCopy(dst.bucket,
+ complPart, err := c.uploadPartCopy(ctx, dst.bucket,
dst.object, uploadID, partIndex, h)
if err != nil {
- return fmt.Errorf("Error in upload-part-copy - %v", err)
+ return err
}
objParts = append(objParts, complPart)
partIndex++
@@ -468,12 +565,12 @@ func (c Client) ComposeObject(dst DestinationInfo, srcs []SourceInfo) error {
}
// 3. Make final complete-multipart request.
- _, err = c.completeMultipartUpload(dst.bucket, dst.object, uploadID,
+ _, err = c.completeMultipartUpload(ctx, dst.bucket, dst.object, uploadID,
completeMultipartUpload{Parts: objParts})
if err != nil {
- err = fmt.Errorf("Error in complete-multipart request - %v", err)
+ return err
}
- return err
+ return nil
}
// partsRequired is ceiling(size / copyPartSize)
diff --git a/api-compose-object_test.go b/api-compose-object_test.go
index 5339d20..0f22a96 100644
--- a/api-compose-object_test.go
+++ b/api-compose-object_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/api-datatypes.go b/api-datatypes.go
index ab2aa4a..63fc089 100644
--- a/api-datatypes.go
+++ b/api-datatypes.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -43,7 +44,7 @@ type ObjectInfo struct {
// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
- Metadata http.Header `json:"metadata"`
+ Metadata http.Header `json:"metadata" xml:"-"`
// Owner name.
Owner struct {
diff --git a/api-error-response.go b/api-error-response.go
index e0019a3..655991c 100644
--- a/api-error-response.go
+++ b/api-error-response.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +21,6 @@ import (
"encoding/xml"
"fmt"
"net/http"
- "strconv"
)
/* **** SAMPLE ERROR RESPONSE ****
@@ -49,6 +49,9 @@ type ErrorResponse struct {
// only in HEAD bucket and ListObjects response.
Region string
+ // Underlying HTTP status code for the returned error
+ StatusCode int `xml:"-" json:"-"`
+
// Headers of the returned S3 XML error
Headers http.Header `xml:"-" json:"-"`
}
@@ -100,7 +103,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
msg := "Response is empty. " + reportIssue
return ErrInvalidArgument(msg)
}
- var errResp ErrorResponse
+
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ }
err := xmlDecoder(resp.Body, &errResp)
// Xml decoding failed with no body, fall back to HTTP headers.
@@ -109,12 +115,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
case http.StatusNotFound:
if objectName == "" {
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "NoSuchBucket",
Message: "The specified bucket does not exist.",
BucketName: bucketName,
}
} else {
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "NoSuchKey",
Message: "The specified key does not exist.",
BucketName: bucketName,
@@ -123,6 +131,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
case http.StatusForbidden:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "AccessDenied",
Message: "Access Denied.",
BucketName: bucketName,
@@ -130,12 +139,14 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
case http.StatusConflict:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "Conflict",
Message: "Bucket not empty.",
BucketName: bucketName,
}
case http.StatusPreconditionFailed:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: "PreconditionFailed",
Message: s3ErrorResponseMap["PreconditionFailed"],
BucketName: bucketName,
@@ -143,6 +154,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
default:
errResp = ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: resp.Status,
Message: resp.Status,
BucketName: bucketName,
@@ -150,7 +162,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
}
}
- // Save hodID, requestID and region information
+ // Save hostID, requestID and region information
// from headers if not available through error XML.
if errResp.RequestID == "" {
errResp.RequestID = resp.Header.Get("x-amz-request-id")
@@ -162,7 +174,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
errResp.Region = resp.Header.Get("x-amz-bucket-region")
}
if errResp.Code == "InvalidRegion" && errResp.Region != "" {
- errResp.Message = fmt.Sprintf("Region does not match, expecting region '%s'.", errResp.Region)
+ errResp.Message = fmt.Sprintf("Region does not match, expecting region ‘%s’.", errResp.Region)
}
// Save headers returned in the API XML error
@@ -173,10 +185,10 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error {
- msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "InvalidArgument",
- Message: msg,
+ Message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.",
BucketName: bucketName,
}
}
@@ -185,6 +197,7 @@ func ErrTransferAccelerationBucket(bucketName string) error {
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize)
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge",
Message: msg,
BucketName: bucketName,
@@ -194,9 +207,10 @@ func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName st
// ErrEntityTooSmall - Input size is smaller than supported minimum.
func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", totalSize)
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", totalSize)
return ErrorResponse{
- Code: "EntityTooLarge",
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
Message: msg,
BucketName: bucketName,
Key: objectName,
@@ -205,9 +219,9 @@ func ErrEntityTooSmall(totalSize int64, bucketName, objectName string) error {
// ErrUnexpectedEOF - Unexpected end of file reached.
func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string) error {
- msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
- strconv.FormatInt(totalRead, 10), strconv.FormatInt(totalSize, 10))
+ msg := fmt.Sprintf("Data read ‘%d’ is not equal to the size ‘%d’ of the input Reader.", totalRead, totalSize)
return ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF",
Message: msg,
BucketName: bucketName,
@@ -218,18 +232,20 @@ func ErrUnexpectedEOF(totalRead, totalSize int64, bucketName, objectName string)
// ErrInvalidBucketName - Invalid bucket name response.
func ErrInvalidBucketName(message string) error {
return ErrorResponse{
- Code: "InvalidBucketName",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: message,
+ RequestID: "minio",
}
}
// ErrInvalidObjectName - Invalid object name response.
func ErrInvalidObjectName(message string) error {
return ErrorResponse{
- Code: "NoSuchKey",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchKey",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -240,9 +256,10 @@ var ErrInvalidObjectPrefix = ErrInvalidObjectName
// ErrInvalidArgument - Invalid argument response.
func ErrInvalidArgument(message string) error {
return ErrorResponse{
- Code: "InvalidArgument",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -250,9 +267,10 @@ func ErrInvalidArgument(message string) error {
// The specified bucket does not have a bucket policy.
func ErrNoSuchBucketPolicy(message string) error {
return ErrorResponse{
- Code: "NoSuchBucketPolicy",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchBucketPolicy",
+ Message: message,
+ RequestID: "minio",
}
}
@@ -260,8 +278,9 @@ func ErrNoSuchBucketPolicy(message string) error {
// The specified API call is not supported
func ErrAPINotSupported(message string) error {
return ErrorResponse{
- Code: "APINotSupported",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusNotImplemented,
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
}
}
diff --git a/api-error-response_test.go b/api-error-response_test.go
index 595cb50..bf10941 100644
--- a/api-error-response_test.go
+++ b/api-error-response_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -7,7 +8,7 @@
*
* http://www.apache.org/licenses/LICENSE-2.0
*
- * Unless required bZy applicable law or agreed to in writing, software
+ * Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
@@ -32,20 +33,23 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genAPIErrorResponse' generates ErrorResponse for given APIError.
// provides a encodable populated response values.
genAPIErrorResponse := func(err APIError, bucketName string) ErrorResponse {
- var errResp = ErrorResponse{}
- errResp.Code = err.Code
- errResp.Message = err.Description
- errResp.BucketName = bucketName
- return errResp
+ return ErrorResponse{
+ Code: err.Code,
+ Message: err.Description,
+ BucketName: bucketName,
+ }
}
// Encodes the response headers into XML format.
- encodeErr := func(response interface{}) []byte {
- var bytesBuffer bytes.Buffer
- bytesBuffer.WriteString(xml.Header)
- encode := xml.NewEncoder(&bytesBuffer)
- encode.Encode(response)
- return bytesBuffer.Bytes()
+ encodeErr := func(response ErrorResponse) []byte {
+ buf := &bytes.Buffer{}
+ buf.WriteString(xml.Header)
+ encoder := xml.NewEncoder(buf)
+ err := encoder.Encode(response)
+ if err != nil {
+ t.Fatalf("error encoding response: %v", err)
+ }
+ return buf.Bytes()
}
// `createAPIErrorResponse` Mocks XML error response from the server.
@@ -65,6 +69,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// 'genErrResponse' contructs error response based http Status Code
genErrResponse := func(resp *http.Response, code, message, bucketName, objectName string) ErrorResponse {
errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
Code: code,
Message: message,
BucketName: bucketName,
@@ -80,9 +85,10 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Generate invalid argument error.
genInvalidError := func(message string) error {
errResp := ErrorResponse{
- Code: "InvalidArgument",
- Message: message,
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: message,
+ RequestID: "minio",
}
return errResp
}
@@ -101,22 +107,22 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Set the StatusCode to the argument supplied.
// Sets common headers.
genEmptyBodyResponse := func(statusCode int) *http.Response {
- resp := &http.Response{}
- // set empty response body.
- resp.Body = ioutil.NopCloser(bytes.NewBuffer([]byte("")))
- // set headers.
+ resp := &http.Response{
+ StatusCode: statusCode,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
setCommonHeaders(resp)
- // set status code.
- resp.StatusCode = statusCode
return resp
}
// Decode XML error message from the http response body.
- decodeXMLError := func(resp *http.Response, t *testing.T) error {
- var errResp ErrorResponse
+ decodeXMLError := func(resp *http.Response) error {
+ errResp := ErrorResponse{
+ StatusCode: resp.StatusCode,
+ }
err := xmlDecoder(resp.Body, &errResp)
if err != nil {
- t.Fatal("XML decoding of response body failed")
+ t.Fatalf("XML decoding of response body failed: %v", err)
}
return errResp
}
@@ -134,12 +140,12 @@ func TestHttpRespToErrorResponse(t *testing.T) {
// Used for asserting the actual response.
expectedErrResponse := []error{
genInvalidError("Response is empty. " + "Please report this issue at https://github.com/minio/minio-go/issues."),
- decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket"), t),
- genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
- genErrResponse(setCommonHeaders(&http.Response{}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
- genErrResponse(setCommonHeaders(&http.Response{}), "Bad Request", "Bad Request", "minio-bucket", ""),
+ decodeXMLError(createAPIErrorResponse(APIErrors[0], "minio-bucket")),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchBucket", "The specified bucket does not exist.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusNotFound}), "NoSuchKey", "The specified key does not exist.", "minio-bucket", "Asia/"),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusForbidden}), "AccessDenied", "Access Denied.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusConflict}), "Conflict", "Bucket not empty.", "minio-bucket", ""),
+ genErrResponse(setCommonHeaders(&http.Response{StatusCode: http.StatusBadRequest}), "Bad Request", "Bad Request", "minio-bucket", ""),
}
// List of http response to be used as input.
@@ -182,6 +188,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
func TestErrEntityTooLarge(t *testing.T) {
msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", 1000000, 99999)
expectedResult := ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "EntityTooLarge",
Message: msg,
BucketName: "minio-bucket",
@@ -189,22 +196,23 @@ func TestErrEntityTooLarge(t *testing.T) {
}
actualResult := ErrEntityTooLarge(1000000, 99999, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrEntityTooSmall' error response.
func TestErrEntityTooSmall(t *testing.T) {
- msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size '0B' for single PUT operation.", -1)
+ msg := fmt.Sprintf("Your proposed upload size ‘%d’ is below the minimum allowed object size ‘0B’ for single PUT operation.", -1)
expectedResult := ErrorResponse{
- Code: "EntityTooLarge",
+ StatusCode: http.StatusBadRequest,
+ Code: "EntityTooSmall",
Message: msg,
BucketName: "minio-bucket",
Key: "Asia/",
}
actualResult := ErrEntityTooSmall(-1, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
@@ -213,6 +221,7 @@ func TestErrUnexpectedEOF(t *testing.T) {
msg := fmt.Sprintf("Data read ‘%s’ is not equal to the size ‘%s’ of the input Reader.",
strconv.FormatInt(100, 10), strconv.FormatInt(101, 10))
expectedResult := ErrorResponse{
+ StatusCode: http.StatusBadRequest,
Code: "UnexpectedEOF",
Message: msg,
BucketName: "minio-bucket",
@@ -220,46 +229,49 @@ func TestErrUnexpectedEOF(t *testing.T) {
}
actualResult := ErrUnexpectedEOF(100, 101, "minio-bucket", "Asia/")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidBucketName' error response.
func TestErrInvalidBucketName(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "InvalidBucketName",
- Message: "Invalid Bucket name",
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidBucketName",
+ Message: "Invalid Bucket name",
+ RequestID: "minio",
}
actualResult := ErrInvalidBucketName("Invalid Bucket name")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidObjectName' error response.
func TestErrInvalidObjectName(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "NoSuchKey",
- Message: "Invalid Object Key",
- RequestID: "minio",
+ StatusCode: http.StatusNotFound,
+ Code: "NoSuchKey",
+ Message: "Invalid Object Key",
+ RequestID: "minio",
}
actualResult := ErrInvalidObjectName("Invalid Object Key")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidArgument' response.
func TestErrInvalidArgument(t *testing.T) {
expectedResult := ErrorResponse{
- Code: "InvalidArgument",
- Message: "Invalid Argument",
- RequestID: "minio",
+ StatusCode: http.StatusBadRequest,
+ Code: "InvalidArgument",
+ Message: "Invalid Argument",
+ RequestID: "minio",
}
actualResult := ErrInvalidArgument("Invalid Argument")
if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
+ t.Errorf("Expected result to be '%#v', but instead got '%#v'", expectedResult, actualResult)
}
}
diff --git a/api-get-object-context.go b/api-get-object-context.go
new file mode 100644
index 0000000..f8dfac7
--- /dev/null
+++ b/api-get-object-context.go
@@ -0,0 +1,26 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import "context"
+
+// GetObjectWithContext - returns an seekable, readable object.
+// The options can be used to specify the GET request further.
+func (c Client) GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(ctx, bucketName, objectName, opts)
+}
diff --git a/api-get-object-file.go b/api-get-object-file.go
index c4193e9..2b58220 100644
--- a/api-get-object-file.go
+++ b/api-get-object-file.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,11 +22,34 @@ import (
"os"
"path/filepath"
+ "github.com/minio/minio-go/pkg/encrypt"
+
+ "context"
+
"github.com/minio/minio-go/pkg/s3utils"
)
+// FGetObjectWithContext - download contents of an object to a local file.
+// The options can be used to specify the GET request further.
+func (c Client) FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(ctx, bucketName, objectName, filePath, opts)
+}
+
// FGetObject - download contents of an object to a local file.
-func (c Client) FGetObject(bucketName, objectName, filePath string) error {
+func (c Client) FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error {
+ return c.fGetObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
+}
+
+// FGetEncryptedObject - Decrypt and store an object at filePath.
+func (c Client) FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error {
+ if materials == nil {
+ return ErrInvalidArgument("Unable to recognize empty encryption properties")
+ }
+ return c.FGetObject(bucketName, objectName, filePath, GetObjectOptions{Materials: materials})
+}
+
+// fGetObjectWithContext - fgetObject wrapper function with context
+func (c Client) fGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@@ -60,7 +84,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
}
// Gather md5sum.
- objectStat, err := c.StatObject(bucketName, objectName)
+ objectStat, err := c.StatObject(bucketName, objectName, StatObjectOptions{opts})
if err != nil {
return err
}
@@ -82,13 +106,12 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
// Initialize get object request headers to set the
// appropriate range offsets to read from.
- reqHeaders := NewGetReqHeaders()
if st.Size() > 0 {
- reqHeaders.SetRange(st.Size(), 0)
+ opts.SetRange(st.Size(), 0)
}
// Seek to current position for incoming reader.
- objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders)
+ objectReader, objectStat, err := c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
return err
}
diff --git a/api-get-object.go b/api-get-object.go
index 1078d2f..50bbc22 100644
--- a/api-get-object.go
+++ b/api-get-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"errors"
"fmt"
"io"
@@ -36,27 +38,16 @@ func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMateria
return nil, ErrInvalidArgument("Unable to recognize empty encryption properties")
}
- // Fetch encrypted object
- encReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- return nil, err
- }
- // Stat object to get its encryption metadata
- st, err := encReader.Stat()
- if err != nil {
- return nil, err
- }
-
- // Setup object for decrytion, object is transparently
- // decrypted as the consumer starts reading.
- encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey))
-
- // Success.
- return encryptMaterials, nil
+ return c.GetObject(bucketName, objectName, GetObjectOptions{Materials: encryptMaterials})
}
// GetObject - returns an seekable, readable object.
-func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
+func (c Client) GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
+ return c.getObjectWithContext(context.Background(), bucketName, objectName, opts)
+}
+
+// GetObject wrapper function that accepts a request context
+func (c Client) getObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
@@ -102,34 +93,26 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
if req.isFirstReq {
// First request is a Read/ReadAt.
if req.isReadOp {
- reqHeaders := NewGetReqHeaders()
// Differentiate between wanting the whole object and just a range.
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
- reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
- } else {
- if req.Offset > 0 {
- reqHeaders.SetRange(req.Offset, 0)
- }
-
- // First request is a Read request.
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 {
+ opts.SetRange(req.Offset, 0)
}
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
- resCh <- getResponse{
- Error: err,
- }
+ resCh <- getResponse{Error: err}
return
}
etag = objectInfo.ETag
// Read at least firstReq.Buffer bytes, if not we have
// reached our EOF.
size, err := io.ReadFull(httpReader, req.Buffer)
- if err == io.ErrUnexpectedEOF {
+ if size > 0 && err == io.ErrUnexpectedEOF {
// If an EOF happens after reading some but not
// all the bytes ReadFull returns ErrUnexpectedEOF
err = io.EOF
@@ -144,7 +127,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
} else {
// First request is a Stat or Seek call.
// Only need to run a StatObject until an actual Read or ReadAt request comes through.
- objectInfo, err = c.StatObject(bucketName, objectName)
+ objectInfo, err = c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil {
resCh <- getResponse{
Error: err,
@@ -159,11 +142,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
- reqHeaders := NewGetReqHeaders()
if etag != "" {
- reqHeaders.SetMatchETag(etag)
+ opts.SetMatchETag(etag)
}
- objectInfo, err := c.statObject(bucketName, objectName, reqHeaders)
+ objectInfo, err := c.statObject(ctx, bucketName, objectName, StatObjectOptions{opts})
if err != nil {
resCh <- getResponse{
Error: err,
@@ -183,9 +165,8 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// new ones when they haven't been already.
// All readAt requests are new requests.
if req.DidOffsetChange || !req.beenRead {
- reqHeaders := NewGetReqHeaders()
if etag != "" {
- reqHeaders.SetMatchETag(etag)
+ opts.SetMatchETag(etag)
}
if httpReader != nil {
// Close previously opened http reader.
@@ -194,16 +175,11 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// If this request is a readAt only get the specified range.
if req.isReadAt {
// Range is set with respect to the offset and length of the buffer requested.
- reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
- httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders)
- } else {
- // Range is set with respect to the offset.
- if req.Offset > 0 {
- reqHeaders.SetRange(req.Offset, 0)
- }
-
- httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1)
+ } else if req.Offset > 0 { // Range is set with respect to the offset.
+ opts.SetRange(req.Offset, 0)
}
+ httpReader, objectInfo, err = c.getObject(ctx, bucketName, objectName, opts)
if err != nil {
resCh <- getResponse{
Error: err,
@@ -626,7 +602,7 @@ func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<-
//
// For more information about the HTTP Range header.
// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
-func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
+func (c Client) getObject(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
// Validate input arguments.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, ObjectInfo{}, err
@@ -635,18 +611,12 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
return nil, ObjectInfo{}, err
}
- // Set all the necessary reqHeaders.
- customHeader := make(http.Header)
- for key, value := range reqHeaders.Header {
- customHeader[key] = value
- }
-
// Execute GET on objectName.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(ctx, "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: opts.Header(),
+ contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
return nil, ObjectInfo{}, err
@@ -679,13 +649,28 @@ func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeade
if contentType == "" {
contentType = "application/octet-stream"
}
- var objectStat ObjectInfo
- objectStat.ETag = md5sum
- objectStat.Key = objectName
- objectStat.Size = resp.ContentLength
- objectStat.LastModified = date
- objectStat.ContentType = contentType
+
+ objectStat := ObjectInfo{
+ ETag: md5sum,
+ Key: objectName,
+ Size: resp.ContentLength,
+ LastModified: date,
+ ContentType: contentType,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
+ }
+
+ reader := resp.Body
+ if opts.Materials != nil {
+ err = opts.Materials.SetupDecryptMode(reader, objectStat.Metadata.Get(amzHeaderIV), objectStat.Metadata.Get(amzHeaderKey))
+ if err != nil {
+ return nil, ObjectInfo{}, err
+ }
+ reader = opts.Materials
+ }
// do not close body here, caller will close
- return resp.Body, objectStat, nil
+ return reader, objectStat, nil
}
diff --git a/request-headers.go b/api-get-options.go
index 76c8720..dd70415 100644
--- a/request-headers.go
+++ b/api-get-options.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,80 +21,94 @@ import (
"fmt"
"net/http"
"time"
+
+ "github.com/minio/minio-go/pkg/encrypt"
)
-// RequestHeaders - implement methods for setting special
-// request headers for GET, HEAD object operations.
-// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
-type RequestHeaders struct {
- http.Header
+// GetObjectOptions are used to specify additional headers or options
+// during GET requests.
+type GetObjectOptions struct {
+ headers map[string]string
+
+ Materials encrypt.Materials
+}
+
+// StatObjectOptions are used to specify additional headers or options
+// during GET info/stat requests.
+type StatObjectOptions struct {
+ GetObjectOptions
}
-// NewGetReqHeaders - initializes a new request headers for GET request.
-func NewGetReqHeaders() RequestHeaders {
- return RequestHeaders{
- Header: make(http.Header),
+// Header returns the http.Header representation of the GET options.
+func (o GetObjectOptions) Header() http.Header {
+ headers := make(http.Header, len(o.headers))
+ for k, v := range o.headers {
+ headers.Set(k, v)
}
+ return headers
}
-// NewHeadReqHeaders - initializes a new request headers for HEAD request.
-func NewHeadReqHeaders() RequestHeaders {
- return RequestHeaders{
- Header: make(http.Header),
+// Set adds a key value pair to the options. The
+// key-value pair will be part of the HTTP GET request
+// headers.
+func (o *GetObjectOptions) Set(key, value string) {
+ if o.headers == nil {
+ o.headers = make(map[string]string)
}
+ o.headers[http.CanonicalHeaderKey(key)] = value
}
// SetMatchETag - set match etag.
-func (c RequestHeaders) SetMatchETag(etag string) error {
+func (o *GetObjectOptions) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-Match", "\""+etag+"\"")
+ o.Set("If-Match", "\""+etag+"\"")
return nil
}
// SetMatchETagExcept - set match etag except.
-func (c RequestHeaders) SetMatchETagExcept(etag string) error {
+func (o *GetObjectOptions) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
- c.Set("If-None-Match", "\""+etag+"\"")
+ o.Set("If-None-Match", "\""+etag+"\"")
return nil
}
// SetUnmodified - set unmodified time since.
-func (c RequestHeaders) SetUnmodified(modTime time.Time) error {
+func (o *GetObjectOptions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
- c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
+ o.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat))
return nil
}
// SetModified - set modified time since.
-func (c RequestHeaders) SetModified(modTime time.Time) error {
+func (o *GetObjectOptions) SetModified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
- c.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
+ o.Set("If-Modified-Since", modTime.Format(http.TimeFormat))
return nil
}
// SetRange - set the start and end offset of the object to be read.
// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
-func (c RequestHeaders) SetRange(start, end int64) error {
+func (o *GetObjectOptions) SetRange(start, end int64) error {
switch {
case start == 0 && end < 0:
// Read last '-end' bytes. `bytes=-N`.
- c.Set("Range", fmt.Sprintf("bytes=%d", end))
+ o.Set("Range", fmt.Sprintf("bytes=%d", end))
case 0 < start && end == 0:
// Read everything starting from offset
// 'start'. `bytes=N-`.
- c.Set("Range", fmt.Sprintf("bytes=%d-", start))
+ o.Set("Range", fmt.Sprintf("bytes=%d-", start))
case 0 <= start && start <= end:
// Read everything starting at 'start' till the
// 'end'. `bytes=N-M`
- c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
+ o.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end))
default:
// All other cases such as
// bytes=-3-
diff --git a/api-get-policy.go b/api-get-policy.go
index 10ccdc6..a4259c9 100644
--- a/api-get-policy.go
+++ b/api-get-policy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"encoding/json"
"io/ioutil"
"net/http"
@@ -79,10 +81,10 @@ func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, e
urlValues.Set("policy", "")
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
diff --git a/api-list.go b/api-list.go
index 6de1fe9..3cfb47d 100644
--- a/api-list.go
+++ b/api-list.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"errors"
"fmt"
"net/http"
@@ -38,7 +40,7 @@ import (
//
func (c Client) ListBuckets() ([]BucketInfo, error) {
// Execute GET on service.
- resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256})
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{contentSHA256Hex: emptySHA256Hex})
defer closeResponse(resp)
if err != nil {
return nil, err
@@ -215,10 +217,10 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken s
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -393,10 +395,10 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit
urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys))
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -572,10 +574,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker,
urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads))
// Execute GET on bucketName to list multipart uploads.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -690,11 +692,11 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa
urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts))
// Execute GET on objectName to get list of parts.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
diff --git a/api-notification.go b/api-notification.go
index 25a283a..3f5b30a 100644
--- a/api-notification.go
+++ b/api-notification.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"bufio"
+ "context"
"encoding/json"
"io"
"net/http"
@@ -46,10 +48,10 @@ func (c Client) getBucketNotification(bucketName string) (BucketNotification, er
urlValues.Set("notification", "")
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
@@ -150,7 +152,7 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
// Check ARN partition to verify if listening bucket is supported
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{
- Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
+ Err: ErrAPINotSupported("Listening for bucket notification is specific only to `minio` server endpoints"),
}
return
}
@@ -170,13 +172,16 @@ func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, even
urlValues["events"] = events
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("GET", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
if err != nil {
- continue
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
}
// Validate http response, upon error return quickly.
diff --git a/api-presigned.go b/api-presigned.go
index 8cfcb55..123ad44 100644
--- a/api-presigned.go
+++ b/api-presigned.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"errors"
+ "net/http"
"net/url"
"time"
@@ -25,16 +27,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// supportedGetReqParams - supported request parameters for GET presigned request.
-var supportedGetReqParams = map[string]struct{}{
- "response-expires": {},
- "response-content-type": {},
- "response-cache-control": {},
- "response-content-language": {},
- "response-content-encoding": {},
- "response-content-disposition": {},
-}
-
// presignURL - Returns a presigned URL for an input 'method'.
// Expires maximum is 7days - ie. 604800 and minimum is 1.
func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
@@ -42,61 +34,71 @@ func (c Client) presignURL(method string, bucketName string, objectName string,
if method == "" {
return nil, ErrInvalidArgument("method cannot be empty.")
}
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return nil, err
}
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
- return nil, err
- }
- if err := isValidExpiry(expires); err != nil {
+ if err = isValidExpiry(expires); err != nil {
return nil, err
}
// Convert expires into seconds.
expireSeconds := int64(expires / time.Second)
reqMetadata := requestMetadata{
- presignURL: true,
- bucketName: bucketName,
- objectName: objectName,
- expires: expireSeconds,
- }
-
- // For "GET" we are handling additional request parameters to
- // override its response headers.
- if method == "GET" {
- // Verify if input map has unsupported params, if yes exit.
- for k := range reqParams {
- if _, ok := supportedGetReqParams[k]; !ok {
- return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.")
- }
- }
- // Save the request parameters to be used in presigning for GET request.
- reqMetadata.queryValues = reqParams
+ presignURL: true,
+ bucketName: bucketName,
+ objectName: objectName,
+ expires: expireSeconds,
+ queryValues: reqParams,
}
// Instantiate a new request.
// Since expires is set newRequest will presign the request.
- req, err := c.newRequest(method, reqMetadata)
- if err != nil {
+ var req *http.Request
+ if req, err = c.newRequest(method, reqMetadata); err != nil {
return nil, err
}
return req.URL, nil
}
// PresignedGetObject - Returns a presigned URL to access an object
-// without credentials. Expires maximum is 7days - ie. 604800 and
-// minimum is 1. Additionally you can override a set of response
-// headers using the query parameters.
+// data without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
return c.presignURL("GET", bucketName, objectName, expires, reqParams)
}
-// PresignedPutObject - Returns a presigned URL to upload an object without credentials.
-// Expires maximum is 7days - ie. 604800 and minimum is 1.
+// PresignedHeadObject - Returns a presigned URL to access object
+// metadata without credentials. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec. Additionally you can override
+// a set of response headers using the query parameters.
+func (c Client) PresignedHeadObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
+ return c.presignURL("HEAD", bucketName, objectName, expires, reqParams)
+}
+
+// PresignedPutObject - Returns a presigned URL to upload an object
+// without credentials. URL can have a maximum expiry of upto 7days
+// or a minimum of 1sec.
func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) {
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return nil, err
+ }
return c.presignURL("PUT", bucketName, objectName, expires, nil)
}
+// Presign - returns a presigned URL for any http method of your choice
+// along with custom request params. URL can have a maximum expiry of
+// upto 7days or a minimum of 1sec.
+func (c Client) Presign(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) {
+ return c.presignURL(method, bucketName, objectName, expires, reqParams)
+}
+
// PresignedPostPolicy - Returns POST urlString, form data to upload an object.
func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) {
// Validate input arguments.
diff --git a/api-put-bucket.go b/api-put-bucket.go
index fd37dc1..bb583a7 100644
--- a/api-put-bucket.go
+++ b/api-put-bucket.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +19,7 @@ package minio
import (
"bytes"
+ "context"
"encoding/json"
"encoding/xml"
"fmt"
@@ -75,14 +76,14 @@ func (c Client) MakeBucket(bucketName string, location string) (err error) {
if err != nil {
return err
}
- reqMetadata.contentMD5Bytes = sumMD5(createBucketConfigBytes)
- reqMetadata.contentSHA256Bytes = sum256(createBucketConfigBytes)
+ reqMetadata.contentMD5Base64 = sumMD5Base64(createBucketConfigBytes)
+ reqMetadata.contentSHA256Hex = sum256Hex(createBucketConfigBytes)
reqMetadata.contentBody = bytes.NewReader(createBucketConfigBytes)
reqMetadata.contentLength = int64(len(createBucketConfigBytes))
}
// Execute PUT to create a new bucket.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
@@ -161,16 +162,16 @@ func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAcces
policyBuffer := bytes.NewReader(policyBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: policyBuffer,
- contentLength: int64(len(policyBytes)),
- contentMD5Bytes: sumMD5(policyBytes),
- contentSHA256Bytes: sum256(policyBytes),
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: policyBuffer,
+ contentLength: int64(len(policyBytes)),
+ contentMD5Base64: sumMD5Base64(policyBytes),
+ contentSHA256Hex: sum256Hex(policyBytes),
}
// Execute PUT to upload a new bucket policy.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
@@ -195,10 +196,10 @@ func (c Client) removeBucketPolicy(bucketName string) error {
urlValues.Set("policy", "")
// Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -226,16 +227,16 @@ func (c Client) SetBucketNotification(bucketName string, bucketNotification Buck
notifBuffer := bytes.NewReader(notifBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: notifBuffer,
- contentLength: int64(len(notifBytes)),
- contentMD5Bytes: sumMD5(notifBytes),
- contentSHA256Bytes: sum256(notifBytes),
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Base64: sumMD5Base64(notifBytes),
+ contentSHA256Hex: sum256Hex(notifBytes),
}
// Execute PUT to upload a new bucket notification.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(context.Background(), "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return err
diff --git a/api-put-object-common.go b/api-put-object-common.go
index 833f1fe..c16c3c6 100644
--- a/api-put-object-common.go
+++ b/api-put-object-common.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,7 +18,7 @@
package minio
import (
- "hash"
+ "context"
"io"
"math"
"os"
@@ -25,12 +26,6 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// Verify if reader is *os.File
-func isFile(reader io.Reader) (ok bool) {
- _, ok = reader.(*os.File)
- return
-}
-
// Verify if reader is *minio.Object
func isObject(reader io.Reader) (ok bool) {
_, ok = reader.(*Object)
@@ -40,6 +35,26 @@ func isObject(reader io.Reader) (ok bool) {
// Verify if reader is a generic ReaderAt
func isReadAt(reader io.Reader) (ok bool) {
_, ok = reader.(io.ReaderAt)
+ if ok {
+ var v *os.File
+ v, ok = reader.(*os.File)
+ if ok {
+ // Stdin, Stdout and Stderr all have *os.File type
+ // which happen to also be io.ReaderAt compatible
+ // we need to add special conditions for them to
+ // be ignored by this function.
+ for _, f := range []string{
+ "/dev/stdin",
+ "/dev/stdout",
+ "/dev/stderr",
+ } {
+ if f == v.Name() {
+ ok = false
+ break
+ }
+ }
+ }
+ }
return
}
@@ -76,31 +91,9 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las
return totalPartsCount, partSize, lastPartSize, nil
}
-// hashCopyN - Calculates chosen hashes up to partSize amount of bytes.
-func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) {
- hashWriter := writer
- for _, v := range hashAlgorithms {
- hashWriter = io.MultiWriter(hashWriter, v)
- }
-
- // Copies to input at writer.
- size, err = io.CopyN(hashWriter, reader, partSize)
- if err != nil {
- // If not EOF return error right here.
- if err != io.EOF {
- return 0, err
- }
- }
-
- for k, v := range hashAlgorithms {
- hashSums[k] = v.Sum(nil)
- }
- return size, err
-}
-
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
-func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
+func (c Client) newUploadID(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (uploadID string, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return "", err
@@ -110,7 +103,7 @@ func (c Client) newUploadID(bucketName, objectName string, metaData map[string][
}
// Initiate multipart upload for an object.
- initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
+ initMultipartUploadResult, err := c.initiateMultipartUpload(ctx, bucketName, objectName, opts)
if err != nil {
return "", err
}
diff --git a/api-put-object-context.go b/api-put-object-context.go
new file mode 100644
index 0000000..a6f23dc
--- /dev/null
+++ b/api-put-object-context.go
@@ -0,0 +1,39 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "io"
+)
+
+// PutObjectWithContext - Identical to PutObject call, but accepts context to facilitate request cancellation.
+func (c Client) PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ err = opts.validate()
+ if err != nil {
+ return 0, err
+ }
+ if opts.EncryptMaterials != nil {
+ if err = opts.EncryptMaterials.SetupEncryptMode(reader); err != nil {
+ return 0, err
+ }
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, opts.EncryptMaterials, opts)
+ }
+ return c.putObjectCommon(ctx, bucketName, objectName, reader, objectSize, opts)
+}
diff --git a/api-put-object-copy.go b/api-put-object-copy.go
index 32fa873..8032009 100644
--- a/api-put-object-copy.go
+++ b/api-put-object-copy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/api-put-object-encrypted.go b/api-put-object-encrypted.go
index 141b3e9..87dd1ab 100644
--- a/api-put-object-encrypted.go
+++ b/api-put-object-encrypted.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,13 +18,14 @@
package minio
import (
+ "context"
"io"
"github.com/minio/minio-go/pkg/encrypt"
)
// PutEncryptedObject - Encrypt and store object.
-func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int64, err error) {
if encryptMaterials == nil {
return 0, ErrInvalidArgument("Unable to recognize empty encryption properties")
@@ -33,14 +35,10 @@ func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Read
return 0, err
}
- if metadata == nil {
- metadata = make(map[string][]string)
- }
-
- // Set the necessary encryption headers, for future decryption.
- metadata[amzHeaderIV] = []string{encryptMaterials.GetIV()}
- metadata[amzHeaderKey] = []string{encryptMaterials.GetKey()}
- metadata[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()}
+ return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, -1, PutObjectOptions{EncryptMaterials: encryptMaterials})
+}
- return c.putObjectMultipart(bucketName, objectName, encryptMaterials, -1, metadata, progress)
+// FPutEncryptedObject - Encrypt and store an object with contents from file at filePath.
+func (c Client) FPutEncryptedObject(bucketName, objectName, filePath string, encryptMaterials encrypt.Materials) (n int64, err error) {
+ return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, PutObjectOptions{EncryptMaterials: encryptMaterials})
}
diff --git a/api-put-object-file-context.go b/api-put-object-file-context.go
new file mode 100644
index 0000000..140a9c0
--- /dev/null
+++ b/api-put-object-file-context.go
@@ -0,0 +1,64 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "context"
+ "mime"
+ "os"
+ "path/filepath"
+
+ "github.com/minio/minio-go/pkg/s3utils"
+)
+
+// FPutObjectWithContext - Create an object in a bucket, with contents from file at filePath. Allows request cancellation.
+func (c Client) FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err := s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err := s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Open the referenced file.
+ fileReader, err := os.Open(filePath)
+ // If any error fail quickly here.
+ if err != nil {
+ return 0, err
+ }
+ defer fileReader.Close()
+
+ // Save the file stat.
+ fileStat, err := fileReader.Stat()
+ if err != nil {
+ return 0, err
+ }
+
+ // Save the file size.
+ fileSize := fileStat.Size()
+
+ // Set contentType based on filepath extension if not given or default
+ // value of "application/octet-stream" if the extension has no associated type.
+ if opts.ContentType == "" {
+ if opts.ContentType = mime.TypeByExtension(filepath.Ext(filePath)); opts.ContentType == "" {
+ opts.ContentType = "application/octet-stream"
+ }
+ }
+ return c.PutObjectWithContext(ctx, bucketName, objectName, fileReader, fileSize, opts)
+}
diff --git a/api-put-object-file.go b/api-put-object-file.go
index 81cdf5c..7c8e051 100644
--- a/api-put-object-file.go
+++ b/api-put-object-file.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,50 +18,10 @@
package minio
import (
- "mime"
- "os"
- "path/filepath"
-
- "github.com/minio/minio-go/pkg/s3utils"
+ "context"
)
-// FPutObject - Create an object in a bucket, with contents from file at filePath.
-func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) (n int64, err error) {
- // Input validation.
- if err := s3utils.CheckValidBucketName(bucketName); err != nil {
- return 0, err
- }
- if err := s3utils.CheckValidObjectName(objectName); err != nil {
- return 0, err
- }
-
- // Open the referenced file.
- fileReader, err := os.Open(filePath)
- // If any error fail quickly here.
- if err != nil {
- return 0, err
- }
- defer fileReader.Close()
-
- // Save the file stat.
- fileStat, err := fileReader.Stat()
- if err != nil {
- return 0, err
- }
-
- // Save the file size.
- fileSize := fileStat.Size()
-
- objMetadata := make(map[string][]string)
-
- // Set contentType based on filepath extension if not given or default
- // value of "binary/octet-stream" if the extension has no associated type.
- if contentType == "" {
- if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" {
- contentType = "application/octet-stream"
- }
- }
-
- objMetadata["Content-Type"] = []string{contentType}
- return c.putObjectCommon(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
+// FPutObject - Create an object in a bucket, with contents from file at filePath
+func (c Client) FPutObject(bucketName, objectName, filePath string, opts PutObjectOptions) (n int64, err error) {
+ return c.FPutObjectWithContext(context.Background(), bucketName, objectName, filePath, opts)
}
diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go
index 1938378..f5b8893 100644
--- a/api-put-object-multipart.go
+++ b/api-put-object-multipart.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,12 +19,16 @@ package minio
import (
"bytes"
+ "context"
+ "encoding/base64"
+ "encoding/hex"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
+ "runtime/debug"
"sort"
"strconv"
"strings"
@@ -31,9 +36,9 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64,
- metadata map[string][]string, progress io.Reader) (n int64, err error) {
- n, err = c.putObjectMultipartNoStream(bucketName, objectName, reader, size, metadata, progress)
+func (c Client) putObjectMultipart(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64,
+ opts PutObjectOptions) (n int64, err error) {
+ n, err = c.putObjectMultipartNoStream(ctx, bucketName, objectName, reader, opts)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@@ -44,14 +49,13 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
}
return n, err
}
-func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader io.Reader, size int64,
- metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartNoStream(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -68,85 +72,93 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(size)
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
if err != nil {
return 0, err
}
// Initiate a new multipart upload.
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
// Part number always starts with '1'.
partNumber := 1
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
-
// Initialize parts uploaded map.
partsInfo := make(map[int]ObjectPart)
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN,
// avoid sha256 with non-v4 signature request or
// HTTPS connection.
hashAlgos, hashSums := c.hashMaterials()
- // Calculates hash sums while copying partSize bytes into tmpBuffer.
- prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize)
- if rErr != nil && rErr != io.EOF {
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF {
return 0, rErr
}
- var reader io.Reader
+ // Calculates hash sums while copying partSize bytes into cw.
+ for k, v := range hashAlgos {
+ v.Write(buf[:length])
+ hashSums[k] = v.Sum(nil)
+ }
+
// Update progress reader appropriately to the latest offset
// as we read from the source.
- reader = newHook(tmpBuffer, progress)
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Checksums..
+ var (
+ md5Base64 string
+ sha256Hex string
+ )
+ if hashSums["md5"] != nil {
+ md5Base64 = base64.StdEncoding.EncodeToString(hashSums["md5"])
+ }
+ if hashSums["sha256"] != nil {
+ sha256Hex = hex.EncodeToString(hashSums["sha256"])
+ }
// Proceed to upload the part.
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
- hashSums["md5"], hashSums["sha256"], prtSize, metadata)
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ md5Base64, sha256Hex, int64(length), opts.UserMetadata)
if err != nil {
- // Reset the temporary buffer upon any error.
- tmpBuffer.Reset()
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
- // Reset the temporary buffer.
- tmpBuffer.Reset()
-
// Save successfully uploaded size.
- totalUploadedSize += prtSize
+ totalUploadedSize += int64(length)
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
- if size < 0 && rErr == io.EOF {
+ if rErr == io.EOF {
break
}
}
- // Verify if we uploaded all the data.
- if size > 0 {
- if totalUploadedSize != size {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
- }
- }
-
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
@@ -162,7 +174,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- if _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
return totalUploadedSize, err
}
@@ -171,7 +183,7 @@ func (c Client) putObjectMultipartNoStream(bucketName, objectName string, reader
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
-func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata map[string][]string) (initiateMultipartUploadResult, error) {
+func (c Client) initiateMultipartUpload(ctx context.Context, bucketName, objectName string, opts PutObjectOptions) (initiateMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@@ -185,17 +197,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
urlValues.Set("uploads", "")
// Set ContentType header.
- customHeader := make(http.Header)
- for k, v := range metadata {
- if len(v) > 0 {
- customHeader.Set(k, v[0])
- }
- }
-
- // Set a default content-type header if the latter is not provided
- if v, ok := metadata["Content-Type"]; !ok || len(v) == 0 {
- customHeader.Set("Content-Type", "application/octet-stream")
- }
+ customHeader := opts.Header()
reqMetadata := requestMetadata{
bucketName: bucketName,
@@ -205,7 +207,7 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
}
// Execute POST on an objectName to initiate multipart upload.
- resp, err := c.executeMethod("POST", reqMetadata)
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return initiateMultipartUploadResult{}, err
@@ -227,8 +229,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName string, metadata
const serverEncryptionKeyPrefix = "x-amz-server-side-encryption"
// uploadPart - Uploads a part in a multipart upload.
-func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader,
- partNumber int, md5Sum, sha256Sum []byte, size int64, metadata map[string][]string) (ObjectPart, error) {
+func (c Client) uploadPart(ctx context.Context, bucketName, objectName, uploadID string, reader io.Reader,
+ partNumber int, md5Base64, sha256Hex string, size int64, metadata map[string]string) (ObjectPart, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectPart{}, err
@@ -261,24 +263,24 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
for k, v := range metadata {
if len(v) > 0 {
if strings.HasPrefix(strings.ToLower(k), serverEncryptionKeyPrefix) {
- customHeader.Set(k, v[0])
+ customHeader.Set(k, v)
}
}
}
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
}
// Execute PUT on each part.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectPart{}, err
@@ -299,7 +301,7 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re
}
// completeMultipartUpload - Completes a multipart upload by assembling previously uploaded parts.
-func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
+func (c Client) completeMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string,
complete completeMultipartUpload) (completeMultipartUploadResult, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
@@ -312,7 +314,6 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
// Initialize url queries.
urlValues := make(url.Values)
urlValues.Set("uploadId", uploadID)
-
// Marshal complete multipart body.
completeMultipartUploadBytes, err := xml.Marshal(complete)
if err != nil {
@@ -322,16 +323,16 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string,
// Instantiate all the complete multipart buffer.
completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes)
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentBody: completeMultipartUploadBuffer,
- contentLength: int64(len(completeMultipartUploadBytes)),
- contentSHA256Bytes: sum256(completeMultipartUploadBytes),
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentBody: completeMultipartUploadBuffer,
+ contentLength: int64(len(completeMultipartUploadBytes)),
+ contentSHA256Hex: sum256Hex(completeMultipartUploadBytes),
}
// Execute POST to complete multipart upload for an objectName.
- resp, err := c.executeMethod("POST", reqMetadata)
+ resp, err := c.executeMethod(ctx, "POST", reqMetadata)
defer closeResponse(resp)
if err != nil {
return completeMultipartUploadResult{}, err
diff --git a/api-put-object-streaming.go b/api-put-object-streaming.go
index 0d4639e..579cb54 100644
--- a/api-put-object-streaming.go
+++ b/api-put-object-streaming.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"fmt"
"io"
"net/http"
@@ -26,33 +28,23 @@ import (
"github.com/minio/minio-go/pkg/s3utils"
)
-// PutObjectStreaming using AWS streaming signature V4
-func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, nil, nil)
-}
-
// putObjectMultipartStream - upload a large object using
// multipart upload and streaming signature for signing payload.
// Comprehensive put object operation involving multipart uploads.
//
// Following code handles these types of readers.
//
-// - *os.File
// - *minio.Object
// - Any reader which has a method 'ReadAt()'
//
-func (c Client) putObjectMultipartStream(bucketName, objectName string,
- reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
-
- // Verify if reader is *minio.Object, *os.File or io.ReaderAt.
- // NOTE: Verification of object is kept for a specific purpose
- // while it is going to be duck typed similar to io.ReaderAt.
- // It is to indicate that *minio.Object implements io.ReaderAt.
- // and such a functionality is used in the subsequent code path.
- if isFile(reader) || !isObject(reader) && isReadAt(reader) {
- n, err = c.putObjectMultipartStreamFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metadata, progress)
+func (c Client) putObjectMultipartStream(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
+
+ if !isObject(reader) && isReadAt(reader) {
+ // Verify if the reader implements ReadAt and it is not a *minio.Object then we will use parallel uploader.
+ n, err = c.putObjectMultipartStreamFromReadAt(ctx, bucketName, objectName, reader.(io.ReaderAt), size, opts)
} else {
- n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ n, err = c.putObjectMultipartStreamNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
if err != nil {
errResp := ToErrorResponse(err)
@@ -64,7 +56,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string,
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
}
return n, err
@@ -94,8 +86,8 @@ type uploadPartReq struct {
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
-func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string,
- reader io.ReaderAt, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketName, objectName string,
+ reader io.ReaderAt, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -111,7 +103,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
}
// Initiate a new multipart upload.
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
@@ -122,7 +114,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// to relinquish storage space.
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -150,10 +142,9 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
close(uploadPartsCh)
-
// Receive each part number from the channel allowing three parallel uploads.
- for w := 1; w <= totalWorkers; w++ {
- go func() {
+ for w := 1; w <= opts.getNumThreads(); w++ {
+ go func(partSize int64) {
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
@@ -170,13 +161,13 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
}
// Get a section reader on a particular offset.
- sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), progress)
+ sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress)
// Proceed to upload the part.
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
sectionReader, uploadReq.PartNum,
- nil, nil, partSize, metadata)
+ "", "", partSize, opts.UserMetadata)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
@@ -197,7 +188,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
Error: nil,
}
}
- }()
+ }(partSize)
}
// Gather the responses as they occur and update any
@@ -229,7 +220,7 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
@@ -238,8 +229,8 @@ func (c Client) putObjectMultipartStreamFromReadAt(bucketName, objectName string
return totalUploadedSize, nil
}
-func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string,
- reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectMultipartStreamNoChecksum(ctx context.Context, bucketName, objectName string,
+ reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err = s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -253,9 +244,8 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
if err != nil {
return 0, err
}
-
// Initiates a new multipart request
- uploadID, err := c.newUploadID(bucketName, objectName, metadata)
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
if err != nil {
return 0, err
}
@@ -266,7 +256,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// storage space.
defer func() {
if err != nil {
- c.abortMultipartUpload(bucketName, objectName, uploadID)
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
}
}()
@@ -281,17 +271,16 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
for partNumber = 1; partNumber <= totalPartsCount; partNumber++ {
// Update progress reader appropriately to the latest offset
// as we read from the source.
- hookReader := newHook(reader, progress)
+ hookReader := newHook(reader, opts.Progress)
// Proceed to upload the part.
if partNumber == totalPartsCount {
partSize = lastPartSize
}
-
var objPart ObjectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID,
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID,
io.LimitReader(hookReader, partSize),
- partNumber, nil, nil, partSize, metadata)
+ partNumber, "", "", partSize, opts.UserMetadata)
if err != nil {
return totalUploadedSize, err
}
@@ -328,7 +317,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
+ _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
@@ -339,7 +328,7 @@ func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
-func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectNoChecksum(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return 0, err
@@ -355,17 +344,22 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
}
if size > 0 {
if isReadAt(reader) && !isObject(reader) {
- reader = io.NewSectionReader(reader.(io.ReaderAt), 0, size)
+ seeker, _ := reader.(io.Seeker)
+ offset, err := seeker.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return 0, ErrInvalidArgument(err.Error())
+ }
+ reader = io.NewSectionReader(reader.(io.ReaderAt), offset, size)
}
}
// Update progress reader appropriately to the latest offset as we
// read from the source.
- readSeeker := newHook(reader, progress)
+ readSeeker := newHook(reader, opts.Progress)
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
- st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
+ st, err := c.putObjectDo(ctx, bucketName, objectName, readSeeker, "", "", size, opts)
if err != nil {
return 0, err
}
@@ -377,7 +371,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
-func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
+func (c Client) putObjectDo(ctx context.Context, bucketName, objectName string, reader io.Reader, md5Base64, sha256Hex string, size int64, opts PutObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -385,35 +379,22 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
-
// Set headers.
- customHeader := make(http.Header)
-
- // Set metadata to headers
- for k, v := range metaData {
- if len(v) > 0 {
- customHeader.Set(k, v[0])
- }
- }
-
- // If Content-Type is not provided, set the default application/octet-stream one
- if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
- customHeader.Set("Content-Type", "application/octet-stream")
- }
+ customHeader := opts.Header()
// Populate request metadata.
reqMetadata := requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- customHeader: customHeader,
- contentBody: reader,
- contentLength: size,
- contentMD5Bytes: md5Sum,
- contentSHA256Bytes: sha256Sum,
+ bucketName: bucketName,
+ objectName: objectName,
+ customHeader: customHeader,
+ contentBody: reader,
+ contentLength: size,
+ contentMD5Base64: md5Base64,
+ contentSHA256Hex: sha256Hex,
}
// Execute PUT an objectName.
- resp, err := c.executeMethod("PUT", reqMetadata)
+ resp, err := c.executeMethod(ctx, "PUT", reqMetadata)
defer closeResponse(resp)
if err != nil {
return ObjectInfo{}, err
diff --git a/api-put-object.go b/api-put-object.go
index 2ea4987..1fda1bc 100644
--- a/api-put-object.go
+++ b/api-put-object.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,117 +18,84 @@
package minio
import (
+ "bytes"
+ "context"
+ "fmt"
"io"
- "os"
- "reflect"
- "runtime"
- "strings"
+ "net/http"
+ "runtime/debug"
+ "sort"
- "github.com/minio/minio-go/pkg/credentials"
+ "github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
)
-// toInt - converts go value to its integer representation based
-// on the value kind if it is an integer.
-func toInt(value reflect.Value) (size int64) {
- size = -1
- if value.IsValid() {
- switch value.Kind() {
- case reflect.Int:
- fallthrough
- case reflect.Int8:
- fallthrough
- case reflect.Int16:
- fallthrough
- case reflect.Int32:
- fallthrough
- case reflect.Int64:
- size = value.Int()
- }
+// PutObjectOptions represents options specified by user for PutObject call
+type PutObjectOptions struct {
+ UserMetadata map[string]string
+ Progress io.Reader
+ ContentType string
+ ContentEncoding string
+ ContentDisposition string
+ CacheControl string
+ EncryptMaterials encrypt.Materials
+ NumThreads uint
+}
+
+// getNumThreads - gets the number of threads to be used in the multipart
+// put object operation
+func (opts PutObjectOptions) getNumThreads() (numThreads int) {
+ if opts.NumThreads > 0 {
+ numThreads = int(opts.NumThreads)
+ } else {
+ numThreads = totalWorkers
}
- return size
+ return
}
-// getReaderSize - Determine the size of Reader if available.
-func getReaderSize(reader io.Reader) (size int64, err error) {
- size = -1
- if reader == nil {
- return -1, nil
- }
- // Verify if there is a method by name 'Size'.
- sizeFn := reflect.ValueOf(reader).MethodByName("Size")
- // Verify if there is a method by name 'Len'.
- lenFn := reflect.ValueOf(reader).MethodByName("Len")
- if sizeFn.IsValid() {
- if sizeFn.Kind() == reflect.Func {
- // Call the 'Size' function and save its return value.
- result := sizeFn.Call([]reflect.Value{})
- if len(result) == 1 {
- size = toInt(result[0])
- }
- }
- } else if lenFn.IsValid() {
- if lenFn.Kind() == reflect.Func {
- // Call the 'Len' function and save its return value.
- result := lenFn.Call([]reflect.Value{})
- if len(result) == 1 {
- size = toInt(result[0])
- }
- }
+// Header - constructs the headers from metadata entered by user in
+// PutObjectOptions struct
+func (opts PutObjectOptions) Header() (header http.Header) {
+ header = make(http.Header)
+
+ if opts.ContentType != "" {
+ header["Content-Type"] = []string{opts.ContentType}
} else {
- // Fallback to Stat() method, two possible Stat() structs exist.
- switch v := reader.(type) {
- case *os.File:
- var st os.FileInfo
- st, err = v.Stat()
- if err != nil {
- // Handle this case specially for "windows",
- // certain files for example 'Stdin', 'Stdout' and
- // 'Stderr' it is not allowed to fetch file information.
- if runtime.GOOS == "windows" {
- if strings.Contains(err.Error(), "GetFileInformationByHandle") {
- return -1, nil
- }
- }
- return
- }
- // Ignore if input is a directory, throw an error.
- if st.Mode().IsDir() {
- return -1, ErrInvalidArgument("Input file cannot be a directory.")
- }
- // Ignore 'Stdin', 'Stdout' and 'Stderr', since they
- // represent *os.File type but internally do not
- // implement Seekable calls. Ignore them and treat
- // them like a stream with unknown length.
- switch st.Name() {
- case "stdin", "stdout", "stderr":
- return
- // Ignore read/write stream of os.Pipe() which have unknown length too.
- case "|0", "|1":
- return
- }
- var pos int64
- pos, err = v.Seek(0, 1) // SeekCurrent.
- if err != nil {
- return -1, err
- }
- size = st.Size() - pos
- case *Object:
- var st ObjectInfo
- st, err = v.Stat()
- if err != nil {
- return
- }
- var pos int64
- pos, err = v.Seek(0, 1) // SeekCurrent.
- if err != nil {
- return -1, err
- }
- size = st.Size - pos
+ header["Content-Type"] = []string{"application/octet-stream"}
+ }
+ if opts.ContentEncoding != "" {
+ header["Content-Encoding"] = []string{opts.ContentEncoding}
+ }
+ if opts.ContentDisposition != "" {
+ header["Content-Disposition"] = []string{opts.ContentDisposition}
+ }
+ if opts.CacheControl != "" {
+ header["Cache-Control"] = []string{opts.CacheControl}
+ }
+ if opts.EncryptMaterials != nil {
+ header[amzHeaderIV] = []string{opts.EncryptMaterials.GetIV()}
+ header[amzHeaderKey] = []string{opts.EncryptMaterials.GetKey()}
+ header[amzHeaderMatDesc] = []string{opts.EncryptMaterials.GetDesc()}
+ }
+ for k, v := range opts.UserMetadata {
+ if !isAmzHeader(k) && !isStandardHeader(k) && !isSSEHeader(k) {
+ header["X-Amz-Meta-"+k] = []string{v}
+ } else {
+ header[k] = []string{v}
+ }
+ }
+ return
+}
+
+// validate() checks if the UserMetadata map has standard headers or client side
+// encryption headers and raises an error if so.
+func (opts PutObjectOptions) validate() (err error) {
+ for k := range opts.UserMetadata {
+ if isStandardHeader(k) || isCSEHeader(k) {
+ return ErrInvalidArgument(k + " unsupported request parameter for user defined metadata")
}
}
- // Returns the size here.
- return size, err
+ return nil
}
// completedParts is a collection of parts sortable by their part numbers.
@@ -149,39 +117,12 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
// be uploaded through this operation will be 5TiB.
-func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) {
- return c.PutObjectWithMetadata(bucketName, objectName, reader, map[string][]string{
- "Content-Type": []string{contentType},
- }, nil)
+func (c Client) PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,
+ opts PutObjectOptions) (n int64, err error) {
+ return c.PutObjectWithContext(context.Background(), bucketName, objectName, reader, objectSize, opts)
}
-// PutObjectWithSize - is a helper PutObject similar in behavior to PutObject()
-// but takes the size argument explicitly, this function avoids doing reflection
-// internally to figure out the size of input stream. Also if the input size is
-// lesser than 0 this function returns an error.
-func (c Client) PutObjectWithSize(bucketName, objectName string, reader io.Reader, readerSize int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- return c.putObjectCommon(bucketName, objectName, reader, readerSize, metadata, progress)
-}
-
-// PutObjectWithMetadata using AWS streaming signature V4
-func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- return c.PutObjectWithProgress(bucketName, objectName, reader, metadata, progress)
-}
-
-// PutObjectWithProgress using AWS streaming signature V4
-func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) {
- // Size of the object.
- var size int64
-
- // Get reader size.
- size, err = getReaderSize(reader)
- if err != nil {
- return 0, err
- }
- return c.putObjectCommon(bucketName, objectName, reader, size, metadata, progress)
-}
-
-func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (n int64, err error) {
+func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName string, reader io.Reader, size int64, opts PutObjectOptions) (n int64, err error) {
// Check for largest object size allowed.
if size > int64(maxMultipartPutObjectSize) {
return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName)
@@ -190,29 +131,124 @@ func (c Client) putObjectCommon(bucketName, objectName string, reader io.Reader,
// NOTE: Streaming signature is not supported by GCS.
if s3utils.IsGoogleEndpoint(c.endpointURL) {
// Do not compute MD5 for Google Cloud Storage.
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
if c.overrideSignerType.IsV2() {
- if size > 0 && size < minPartSize {
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ if size >= 0 && size < minPartSize {
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
- return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectMultipart(ctx, bucketName, objectName, reader, size, opts)
}
-
- // If size cannot be found on a stream, it is not possible
- // to upload using streaming signature, fall back to multipart.
if size < 0 {
- return c.putObjectMultipart(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts)
}
- // Set streaming signature.
- c.overrideSignerType = credentials.SignatureV4Streaming
-
if size < minPartSize {
- return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}
-
// For all sizes greater than 64MiB do multipart.
- return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress)
+ return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
+}
+
+func (c Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketName, objectName string, reader io.Reader, opts PutObjectOptions) (n int64, err error) {
+ // Input validation.
+ if err = s3utils.CheckValidBucketName(bucketName); err != nil {
+ return 0, err
+ }
+ if err = s3utils.CheckValidObjectName(objectName); err != nil {
+ return 0, err
+ }
+
+ // Total data read and written to server. should be equal to
+ // 'size' at the end of the call.
+ var totalUploadedSize int64
+
+ // Complete multipart upload.
+ var complMultipartUpload completeMultipartUpload
+
+ // Calculate the optimal parts info for a given size.
+ totalPartsCount, partSize, _, err := optimalPartInfo(-1)
+ if err != nil {
+ return 0, err
+ }
+ // Initiate a new multipart upload.
+ uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts)
+ if err != nil {
+ return 0, err
+ }
+
+ defer func() {
+ if err != nil {
+ c.abortMultipartUpload(ctx, bucketName, objectName, uploadID)
+ }
+ }()
+
+ // Part number always starts with '1'.
+ partNumber := 1
+
+ // Initialize parts uploaded map.
+ partsInfo := make(map[int]ObjectPart)
+
+ // Create a buffer.
+ buf := make([]byte, partSize)
+ defer debug.FreeOSMemory()
+
+ for partNumber <= totalPartsCount {
+ length, rErr := io.ReadFull(reader, buf)
+ if rErr == io.EOF && partNumber > 1 {
+ break
+ }
+ if rErr != nil && rErr != io.ErrUnexpectedEOF {
+ return 0, rErr
+ }
+ // Update progress reader appropriately to the latest offset
+ // as we read from the source.
+ rd := newHook(bytes.NewReader(buf[:length]), opts.Progress)
+
+ // Proceed to upload the part.
+ var objPart ObjectPart
+ objPart, err = c.uploadPart(ctx, bucketName, objectName, uploadID, rd, partNumber,
+ "", "", int64(length), opts.UserMetadata)
+ if err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+
+ // Save successfully uploaded size.
+ totalUploadedSize += int64(length)
+
+ // Increment part number.
+ partNumber++
+
+ // For unknown size, Read EOF we break away.
+ // We do not have to upload till totalPartsCount.
+ if rErr == io.EOF {
+ break
+ }
+ }
+
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
+ }
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
+ }
+
+ // Sort all completed parts.
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ if _, err = c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload); err != nil {
+ return totalUploadedSize, err
+ }
+
+ // Return final size.
+ return totalUploadedSize, nil
}
diff --git a/api-put-object_test.go b/api-put-object_test.go
new file mode 100644
index 0000000..e0557e2
--- /dev/null
+++ b/api-put-object_test.go
@@ -0,0 +1,53 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package minio
+
+import (
+ "testing"
+)
+
+func TestPutObjectOptionsValidate(t *testing.T) {
+ testCases := []struct {
+ metadata map[string]string
+ shouldPass bool
+ }{
+ {map[string]string{"Content-Type": "custom/content-type"}, false},
+ {map[string]string{"content-type": "custom/content-type"}, false},
+ {map[string]string{"Content-Encoding": "gzip"}, false},
+ {map[string]string{"Cache-Control": "blah"}, false},
+ {map[string]string{"Content-Disposition": "something"}, false},
+ {map[string]string{"my-custom-header": "blah"}, true},
+ {map[string]string{"X-Amz-Iv": "blah"}, false},
+ {map[string]string{"X-Amz-Key": "blah"}, false},
+ {map[string]string{"X-Amz-Key-prefixed-header": "blah"}, false},
+ {map[string]string{"custom-X-Amz-Key-middle": "blah"}, true},
+ {map[string]string{"my-custom-header-X-Amz-Key": "blah"}, true},
+ {map[string]string{"X-Amz-Matdesc": "blah"}, false},
+ {map[string]string{"blah-X-Amz-Matdesc": "blah"}, true},
+ {map[string]string{"X-Amz-MatDesc-suffix": "blah"}, true},
+ {map[string]string{"x-amz-meta-X-Amz-Iv": "blah"}, false},
+ {map[string]string{"x-amz-meta-X-Amz-Key": "blah"}, false},
+ {map[string]string{"x-amz-meta-X-Amz-Matdesc": "blah"}, false},
+ }
+ for i, testCase := range testCases {
+ err := PutObjectOptions{UserMetadata: testCase.metadata}.validate()
+
+ if testCase.shouldPass && err != nil {
+ t.Errorf("Test %d - output did not match with reference results", i+1)
+ }
+ }
+}
diff --git a/api-remove.go b/api-remove.go
index 3574cbc..f14b2eb 100644
--- a/api-remove.go
+++ b/api-remove.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,6 +19,7 @@ package minio
import (
"bytes"
+ "context"
"encoding/xml"
"io"
"net/http"
@@ -36,9 +38,9 @@ func (c Client) RemoveBucket(bucketName string) error {
return err
}
// Execute DELETE on bucket.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -66,10 +68,10 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
return err
}
// Execute DELETE on objectName.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -187,13 +189,13 @@ func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan
// Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects.
- resp, err := c.executeMethod("POST", requestMetadata{
- bucketName: bucketName,
- queryValues: urlValues,
- contentBody: bytes.NewReader(removeBytes),
- contentLength: int64(len(removeBytes)),
- contentMD5Bytes: sumMD5(removeBytes),
- contentSHA256Bytes: sum256(removeBytes),
+ resp, err := c.executeMethod(context.Background(), "POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Base64: sumMD5Base64(removeBytes),
+ contentSHA256Hex: sum256Hex(removeBytes),
})
if err != nil {
for _, b := range batch {
@@ -227,7 +229,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
}
if uploadID != "" {
// Upload id found, abort the incomplete multipart upload.
- err := c.abortMultipartUpload(bucketName, objectName, uploadID)
+ err := c.abortMultipartUpload(context.Background(), bucketName, objectName, uploadID)
if err != nil {
return err
}
@@ -237,7 +239,7 @@ func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
// abortMultipartUpload aborts a multipart upload for the given
// uploadID, all previously uploaded parts are deleted.
-func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) error {
+func (c Client) abortMultipartUpload(ctx context.Context, bucketName, objectName, uploadID string) error {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return err
@@ -251,11 +253,11 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er
urlValues.Set("uploadId", uploadID)
// Execute DELETE on multipart upload.
- resp, err := c.executeMethod("DELETE", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- queryValues: urlValues,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(ctx, "DELETE", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ queryValues: urlValues,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
diff --git a/api-s3-datatypes.go b/api-s3-datatypes.go
index 4b29740..8d8880c 100644
--- a/api-s3-datatypes.go
+++ b/api-s3-datatypes.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -128,7 +129,7 @@ type initiator struct {
// copyObjectResult container for copy object response.
type copyObjectResult struct {
ETag string
- LastModified string // time string format "2006-01-02T15:04:05.000Z"
+ LastModified time.Time // time string format "2006-01-02T15:04:05.000Z"
}
// ObjectPart container for particular part of an object.
diff --git a/api-stat.go b/api-stat.go
index 4b53032..8904dd6 100644
--- a/api-stat.go
+++ b/api-stat.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,6 +18,7 @@
package minio
import (
+ "context"
"net/http"
"strconv"
"strings"
@@ -33,9 +35,9 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
}
// Execute HEAD on bucketName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- contentSHA256Bytes: emptySHA256,
+ resp, err := c.executeMethod(context.Background(), "HEAD", requestMetadata{
+ bucketName: bucketName,
+ contentSHA256Hex: emptySHA256Hex,
})
defer closeResponse(resp)
if err != nil {
@@ -80,7 +82,7 @@ func extractObjMetadata(header http.Header) http.Header {
}
// StatObject verifies if object exists and you have permission to access.
-func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
+func (c Client) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -88,12 +90,11 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if err := s3utils.CheckValidObjectName(objectName); err != nil {
return ObjectInfo{}, err
}
- reqHeaders := NewHeadReqHeaders()
- return c.statObject(bucketName, objectName, reqHeaders)
+ return c.statObject(context.Background(), bucketName, objectName, opts)
}
// Lower level API for statObject supporting pre-conditions and range headers.
-func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
+func (c Client) statObject(ctx context.Context, bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
// Input validation.
if err := s3utils.CheckValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@@ -102,17 +103,12 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
return ObjectInfo{}, err
}
- customHeader := make(http.Header)
- for k, v := range reqHeaders.Header {
- customHeader[k] = v
- }
-
// Execute HEAD on objectName.
- resp, err := c.executeMethod("HEAD", requestMetadata{
- bucketName: bucketName,
- objectName: objectName,
- contentSHA256Bytes: emptySHA256,
- customHeader: customHeader,
+ resp, err := c.executeMethod(ctx, "HEAD", requestMetadata{
+ bucketName: bucketName,
+ objectName: objectName,
+ contentSHA256Hex: emptySHA256Hex,
+ customHeader: opts.Header(),
})
defer closeResponse(resp)
if err != nil {
@@ -167,11 +163,6 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
contentType = "application/octet-stream"
}
- // Extract only the relevant header keys describing the object.
- // following function filters out a list of standard set of keys
- // which are not part of object metadata.
- metadata := extractObjMetadata(resp.Header)
-
// Save object metadata info.
return ObjectInfo{
ETag: md5sum,
@@ -179,6 +170,9 @@ func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHead
Size: size,
LastModified: date,
ContentType: contentType,
- Metadata: metadata,
+ // Extract only the relevant header keys describing the object.
+ // following function filters out a list of standard set of keys
+ // which are not part of object metadata.
+ Metadata: extractObjMetadata(resp.Header),
}, nil
}
diff --git a/api.go b/api.go
index 6fe508a..9951d47 100644
--- a/api.go
+++ b/api.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,10 +19,9 @@ package minio
import (
"bytes"
+ "context"
"crypto/md5"
"crypto/sha256"
- "encoding/base64"
- "encoding/hex"
"errors"
"fmt"
"hash"
@@ -87,7 +86,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "3.0.0"
+ libraryVersion = "4.0.5"
)
// User Agent should always following the below style.
@@ -178,16 +177,29 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
-// redirectHeaders copies all headers when following a redirect URL.
-// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
-func redirectHeaders(req *http.Request, via []*http.Request) error {
- if len(via) == 0 {
- return nil
- }
- for key, val := range via[0].Header {
- req.Header[key] = val
- }
- return nil
+// getRegionFromURL - parse region from URL if present.
+func getRegionFromURL(u url.URL) (region string) {
+ region = ""
+ if s3utils.IsGoogleEndpoint(u) {
+ return
+ } else if s3utils.IsAmazonChinaEndpoint(u) {
+ // For china specifically we need to set everything to
+ // cn-north-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // China region.
+ return "cn-north-1"
+ } else if s3utils.IsAmazonGovCloudEndpoint(u) {
+ // For us-gov specifically we need to set everything to
+ // us-gov-west-1 for now, there is no easier way until AWS S3
+ // provides a cleaner compatible API across "us-east-1" and
+ // Gov cloud region.
+ return "us-gov-west-1"
+ }
+ parts := s3utils.AmazonS3Host.FindStringSubmatch(u.Host)
+ if len(parts) > 1 {
+ region = parts[1]
+ }
+ return region
}
func privateNew(endpoint string, creds *credentials.Credentials, secure bool, region string) (*Client, error) {
@@ -211,11 +223,13 @@ func privateNew(endpoint string, creds *credentials.Credentials, secure bool, re
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
- Transport: http.DefaultTransport,
- CheckRedirect: redirectHeaders,
+ Transport: defaultMinioTransport,
}
// Sets custom region, if region is empty bucket location cache is used automatically.
+ if region == "" {
+ region = getRegionFromURL(clnt.endpointURL)
+ }
clnt.region = region
// Instantiate bucket location cache.
@@ -328,11 +342,11 @@ type requestMetadata struct {
expires int64
// Generated by our internal code.
- bucketLocation string
- contentBody io.Reader
- contentLength int64
- contentSHA256Bytes []byte
- contentMD5Bytes []byte
+ bucketLocation string
+ contentBody io.Reader
+ contentLength int64
+ contentMD5Base64 string // carries base64 encoded md5sum
+ contentSHA256Hex string // carries hex encoded sha256sum
}
// dumpHTTP - dump HTTP request and response.
@@ -466,9 +480,11 @@ var successStatus = []int{
// executeMethod - instantiates a given method, and retries the
// request upon any error up to maxRetries attempts in a binomially
// delayed manner using a standard back off algorithm.
-func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) {
+func (c Client) executeMethod(ctx context.Context, method string, metadata requestMetadata) (res *http.Response, err error) {
var isRetryable bool // Indicates if request can be retried.
var bodySeeker io.Seeker // Extracted seeker from io.Reader.
+ var reqRetry = MaxRetry // Indicates how many times we can retry the request
+
if metadata.contentBody != nil {
// Check if body is seekable then it is retryable.
bodySeeker, isRetryable = metadata.contentBody.(io.Seeker)
@@ -476,6 +492,11 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
case os.Stdin, os.Stdout, os.Stderr:
isRetryable = false
}
+ // Retry only when reader is seekable
+ if !isRetryable {
+ reqRetry = 1
+ }
+
// Figure out if the body can be closed - if yes
// we will definitely close it upon the function
// return.
@@ -494,7 +515,7 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Blank indentifier is kept here on purpose since 'range' without
// blank identifiers is only supported since go1.4
// https://golang.org/doc/go1.4#forrange.
- for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
+ for range c.newRetryTimer(reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) {
// Retry executes the following function body if request has an
// error until maxRetries have been exhausted, retry attempts are
// performed after waiting for a given period of time in a
@@ -517,6 +538,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
}
return nil, err
}
+ // Add context to request
+ req = req.WithContext(ctx)
// Initiate the request.
res, err = c.do(req)
@@ -562,9 +585,14 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Additionally we should only retry if bucketLocation and custom
// region is empty.
if metadata.bucketLocation == "" && c.region == "" {
- if res.StatusCode == http.StatusBadRequest && errResponse.Region != "" {
- c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
- continue // Retry.
+ if errResponse.Code == "AuthorizationHeaderMalformed" || errResponse.Code == "InvalidRegion" {
+ if metadata.bucketName != "" && errResponse.Region != "" {
+ // Gather Cached location only if bucketName is present.
+ if _, cachedLocationError := c.bucketLocCache.Get(metadata.bucketName); cachedLocationError != false {
+ c.bucketLocCache.Set(metadata.bucketName, errResponse.Region)
+ continue // Retry.
+ }
+ }
}
}
@@ -687,8 +715,8 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
// set md5Sum for content protection.
- if metadata.contentMD5Bytes != nil {
- req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes))
+ if len(metadata.contentMD5Base64) > 0 {
+ req.Header.Set("Content-Md5", metadata.contentMD5Base64)
}
// For anonymous requests just return.
@@ -700,14 +728,17 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
case signerType.IsV2():
// Add signature version '2' authorization header.
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
- case signerType.IsStreamingV4() && method == "PUT":
+ case metadata.objectName != "" && method == "PUT" && metadata.customHeader.Get("X-Amz-Copy-Source") == "" && !c.secure:
+ // Streaming signature is used by default for a PUT object request. Additionally we also
+ // look if the initialized client is secure, if yes then we don't need to perform
+ // streaming signature.
req = s3signer.StreamingSignV4(req, accessKeyID,
secretAccessKey, sessionToken, location, metadata.contentLength, time.Now().UTC())
default:
// Set sha256 sum for signature calculation only with signature version '4'.
shaHeader := unsignedPayload
- if len(metadata.contentSHA256Bytes) > 0 {
- shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes)
+ if metadata.contentSHA256Hex != "" {
+ shaHeader = metadata.contentSHA256Hex
}
req.Header.Set("X-Amz-Content-Sha256", shaHeader)
diff --git a/api_functional_v2_test.go b/api_functional_v2_test.go
deleted file mode 100644
index e81596e..0000000
--- a/api_functional_v2_test.go
+++ /dev/null
@@ -1,1470 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "log"
- "math/rand"
- "net/http"
- "net/url"
- "os"
- "reflect"
- "strings"
- "testing"
- "time"
-
- "github.com/minio/minio-go/pkg/policy"
-)
-
-// Tests bucket re-create errors.
-func TestMakeBucketErrorV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- t.Skip("skipping region functional tests for non s3 runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'eu-west-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
- t.Fatal("Error: make bucket should should fail for", bucketName)
- }
- // Verify valid error response from server.
- if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-}
-
-// Test get object reader to not throw error on being closed twice.
-func TestGetObjectClosedTwiceV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K.
- buf := bytes.Repeat([]byte("h"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests removing partially uploaded objects.
-func TestRemovePartiallyUploadedV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, 128*1024)
- if cerr != nil {
- t.Fatal("Error:", cerr, bucketName)
- }
- i++
- r.Seek(0, 0)
- }
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
- }()
-
- objectName := bucketName + "-resumable"
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err == nil {
- t.Fatal("Error: PutObject should fail.")
- }
- if err.Error() != "proactively closed to be verified later" {
- t.Fatal("Error:", err)
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests FPutObject hidden contentType setting
-func TestFPutObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a temp file with 11*1024*1024 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
- n, err := io.CopyN(file, r, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Add extension to temp file name
- fileName := file.Name()
- err = os.Rename(file.Name(), fileName+".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-standard")
- }
- if rStandard.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-Octet")
- }
- if rOctet.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-GTar")
- }
- if rGTar.ContentType != "application/x-gtar" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(fileName + ".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-// Tests various bucket supported formats.
-func TestMakeBucketRegionsV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- t.Skip("skipping region functional tests for non s3 runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-}
-
-// Tests get object ReaderSeeker interface methods.
-func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K.
- buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
-
- // Save the data.
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
- n, err = r.Seek(offset, 0)
- if err != nil {
- t.Fatal("Error:", err, offset)
- }
- if n != offset {
- t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
- offset, n)
- }
- n, err = r.Seek(0, 1)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != offset {
- t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
- offset, n)
- }
- _, err = r.Seek(offset, 2)
- if err == nil {
- t.Fatal("Error: seek on positive offset for whence '2' should error out")
- }
- n, err = r.Seek(-offset, 2)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != st.Size-offset {
- t.Fatalf("Error: number of bytes seeked back does not match, want %d, got %v\n", st.Size-offset, n)
- }
-
- var buffer1 bytes.Buffer
- if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- // Seek again and read again.
- n, err = r.Seek(offset-1, 0)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != (offset - 1) {
- t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
- }
-
- var buffer2 bytes.Buffer
- if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- // Verify now lesser bytes.
- if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests get object ReaderAt interface methods.
-func TestGetObjectReadAtFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("8"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
-
- // Read directly
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- m, err := r.ReadAt(buf2, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf2), offset)
- }
- if m != len(buf2) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf3), offset)
- }
- if m != len(buf3) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf4), offset)
- }
- if m != len(buf4) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf5))
- }
- }
- if m != len(buf5) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
- }
- if !bytes.Equal(buf, buf5) {
- t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf6))
- }
- }
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests copy object
-func TestCopyObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping functional tests for short runs")
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy")
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("9"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
- }
-
- dst, err := NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- src := NewSourceInfo(bucketName, objectName, nil)
- err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform the Copy
- err = c.CopyObject(dst, src)
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
- }
-
- // Source object
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Check the various fields of source object against destination object.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if objInfo.Size != objInfoCopy.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
- }
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName + "-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests comprehensive list of all methods.
-func TestFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- var exists bool
- exists, err = c.BucketExists(bucketName)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if !exists {
- t.Fatal("Error: could not find ", bucketName)
- }
-
- // Make the bucket 'public read/write'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // List all buckets.
- buckets, err := c.ListBuckets()
- if len(buckets) == 0 {
- t.Fatal("Error: list buckets cannot be empty", buckets)
- }
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- t.Fatal("Error: bucket ", bucketName, "not found")
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if n != int64(len(buf)) {
- t.Fatal("Error: bad length ", n, len(buf))
- }
-
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-nolength")
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- objFound = false
- isRecursive = true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- incompObjNotFound := true
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- t.Fatal("Error: unexpected dangling incomplete upload found.")
- }
-
- newReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Generate presigned GET object url.
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- // Generate presigned GET object url.
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch for presigned GET url.")
- }
- // Verify content disposition.
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
- }
-
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Generate data more than 32K
- buf = bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
-
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- t.Fatal("Error: ", err)
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-nolength")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err == nil {
- t.Fatal("Error:")
- }
- if err.Error() != "The specified bucket does not exist" {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName); err != nil {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- t.Fatal("Error: ", err)
- }
-}
-
-func testComposeObjectErrorCases(c *Client, t *testing.T) {
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Test that more than 10K source objects cannot be
- // concatenated.
- srcArr := [10001]SourceInfo{}
- srcSlice := srcArr[:]
- dst, err := NewDestinationInfo(bucketName, "object", nil, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- if err := c.ComposeObject(dst, srcSlice); err == nil {
- t.Fatal("Error was expected.")
- } else if err.Error() != "There must be as least one and upto 10000 source objects." {
- t.Fatal("Got unexpected error: ", err)
- }
-
- // Create a source with invalid offset spec and check that
- // error is returned:
- // 1. Create the source object.
- const badSrcSize = 5 * 1024 * 1024
- buf := bytes.Repeat([]byte("1"), badSrcSize)
- _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- // 2. Set invalid range spec on the object (going beyond
- // object size)
- badSrc := NewSourceInfo(bucketName, "badObject", nil)
- err = badSrc.SetRange(1, badSrcSize)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // 3. ComposeObject call should fail.
- if err := c.ComposeObject(dst, []SourceInfo{badSrc}); err == nil {
- t.Fatal("Error was expected.")
- } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
- t.Fatal("Got unexpected error: ", err)
- }
-}
-
-// Test expected error cases
-func TestComposeObjectErrorCasesV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- testComposeObjectErrorCases(c, t)
-}
-
-func testComposeMultipleSources(c *Client, t *testing.T) {
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Upload a small source object
- const srcSize = 1024 * 1024 * 5
- buf := bytes.Repeat([]byte("1"), srcSize)
- _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // We will append 10 copies of the object.
- srcs := []SourceInfo{}
- for i := 0; i < 10; i++ {
- srcs = append(srcs, NewSourceInfo(bucketName, "srcObject", nil))
- }
- // make the last part very small
- err = srcs[9].SetRange(0, 0)
- if err != nil {
- t.Fatal("unexpected error:", err)
- }
-
- dst, err := NewDestinationInfo(bucketName, "dstObject", nil, nil)
- if err != nil {
- t.Fatal(err)
- }
- err = c.ComposeObject(dst, srcs)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- objProps, err := c.StatObject(bucketName, "dstObject")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- if objProps.Size != 9*srcSize+1 {
- t.Fatal("Size mismatched! Expected:", 10000*srcSize, "but got:", objProps.Size)
- }
-}
-
-// Test concatenating multiple objects objects
-func TestCompose10KSourcesV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- testComposeMultipleSources(c, t)
-}
-
-func testEncryptedCopyObject(c *Client, t *testing.T) {
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- key1 := NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
- key2 := NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
-
- // 1. create an sse-c encrypted object to copy by uploading
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- metadata := make(map[string][]string)
- for k, v := range key1.GetSSEHeaders() {
- metadata[k] = append(metadata[k], v)
- }
- _, err = c.PutObjectWithSize(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), metadata, nil)
- if err != nil {
- t.Fatal("PutObjectWithSize Error:", err)
- }
-
- // 2. copy object and change encryption key
- src := NewSourceInfo(bucketName, "srcObject", &key1)
- dst, err := NewDestinationInfo(bucketName, "dstObject", &key2, nil)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.CopyObject(dst, src)
- if err != nil {
- t.Fatal("CopyObject Error:", err)
- }
-
- // 3. get copied object and check if content is equal
- reqH := NewGetReqHeaders()
- for k, v := range key2.GetSSEHeaders() {
- reqH.Set(k, v)
- }
- coreClient := Core{c}
- reader, _, err := coreClient.GetObject(bucketName, "dstObject", reqH)
- if err != nil {
- t.Fatal("GetObject Error:", err)
- }
- defer reader.Close()
-
- decBytes, err := ioutil.ReadAll(reader)
- if err != nil {
- log.Fatalln(err)
- }
- if !bytes.Equal(decBytes, buf) {
- log.Fatal("downloaded object mismatched for encrypted object")
- }
-}
-
-// Test encrypted copy object
-func TestEncryptedCopyObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- testEncryptedCopyObject(c, t)
-}
-
-func testUserMetadataCopying(c *Client, t *testing.T) {
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- // Make a new bucket in 'us-east-1' (source bucket).
- err := c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- fetchMeta := func(object string) (h http.Header) {
- objInfo, err := c.StatObject(bucketName, object)
- if err != nil {
- t.Fatal("Metadata fetch error:", err)
- }
- h = make(http.Header)
- for k, vs := range objInfo.Metadata {
- if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
- for _, v := range vs {
- h.Add(k, v)
- }
- }
- }
- return h
- }
-
- // 1. create a client encrypted object to copy by uploading
- const srcSize = 1024 * 1024
- buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
- metadata := make(http.Header)
- metadata.Set("x-amz-meta-myheader", "myvalue")
- _, err = c.PutObjectWithMetadata(bucketName, "srcObject",
- bytes.NewReader(buf), metadata, nil)
- if err != nil {
- t.Fatal("Put Error:", err)
- }
- if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
- t.Fatal("Unequal metadata")
- }
-
- // 2. create source
- src := NewSourceInfo(bucketName, "srcObject", nil)
- // 2.1 create destination with metadata set
- dst1, err := NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // 3. Check that copying to an object with metadata set resets
- // the headers on the copy.
- err = c.CopyObject(dst1, src)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- expectedHeaders := make(http.Header)
- expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
- t.Fatal("Unequal metadata")
- }
-
- // 4. create destination with no metadata set and same source
- dst2, err := NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
- if err != nil {
- t.Fatal("Error:", err)
-
- }
- src = NewSourceInfo(bucketName, "srcObject", nil)
-
- // 5. Check that copying to an object with no metadata set,
- // copies metadata.
- err = c.CopyObject(dst2, src)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- expectedHeaders = metadata
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
- t.Fatal("Unequal metadata")
- }
-
- // 6. Compose a pair of sources.
- srcs := []SourceInfo{
- NewSourceInfo(bucketName, "srcObject", nil),
- NewSourceInfo(bucketName, "srcObject", nil),
- }
- dst3, err := NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
- if err != nil {
- t.Fatal("Error:", err)
-
- }
-
- err = c.ComposeObject(dst3, srcs)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Check that no headers are copied in this case
- if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
- t.Fatal("Unequal metadata")
- }
-
- // 7. Compose a pair of sources with dest user metadata set.
- srcs = []SourceInfo{
- NewSourceInfo(bucketName, "srcObject", nil),
- NewSourceInfo(bucketName, "srcObject", nil),
- }
- dst4, err := NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
- if err != nil {
- t.Fatal("Error:", err)
-
- }
-
- err = c.ComposeObject(dst4, srcs)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Check that no headers are copied in this case
- expectedHeaders = make(http.Header)
- expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
- if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
- t.Fatal("Unequal metadata")
- }
-}
-
-func TestUserMetadataCopyingV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV2(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // c.TraceOn(os.Stderr)
- testUserMetadataCopying(c, t)
-}
diff --git a/api_functional_v4_test.go b/api_functional_v4_test.go
deleted file mode 100644
index e9593dd..0000000
--- a/api_functional_v4_test.go
+++ /dev/null
@@ -1,2410 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/url"
- "os"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/minio/minio-go/pkg/encrypt"
- "github.com/minio/minio-go/pkg/policy"
-)
-
-const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
-const (
- letterIdxBits = 6 // 6 bits to represent a letter index
- letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
- letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
-)
-
-// randString generates random names and prepends them with a known prefix.
-func randString(n int, src rand.Source, prefix string) string {
- b := make([]byte, n)
- // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
- for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
- if remain == 0 {
- cache, remain = src.Int63(), letterIdxMax
- }
- if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
- b[i] = letterBytes[idx]
- i--
- }
- cache >>= letterIdxBits
- remain--
- }
- return prefix + string(b[0:30-len(prefix)])
-}
-
-// Tests bucket re-create errors.
-func TestMakeBucketError(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- t.Skip("skipping region functional tests for non s3 runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
- t.Fatal("Error: make bucket should should fail for", bucketName)
- }
- // Verify valid error response from server.
- if ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if err = c.MakeBucket(bucketName+"..-1", "eu-central-1"); err == nil {
- t.Fatal("Error:", err, bucketName+"..-1")
- }
- // Verify valid error response.
- if err != nil && err.Error() != "Bucket name contains invalid characters" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
- if err = c.MakeBucket(bucketName+"AAA-1", "eu-central-1"); err == nil {
- t.Fatal("Error:", err, bucketName+"..-1")
- }
- // Verify valid error response.
- if err != nil && err.Error() != "Bucket name contains invalid characters" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
-}
-
-// Tests various bucket supported formats.
-func TestMakeBucketRegions(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
- if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
- t.Skip("skipping region functional tests for non s3 runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func TestPutObjectReadAt(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data using 4 parts so that all 3 'workers' are utilized and a part is leftover.
- // Use different data for each part for multipart tests to ensure part order at the end.
- var buf []byte
- for i := 0; i < 4; i++ {
- buf = append(buf, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- // Object content type
- objectContentType := "binary/octet-stream"
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), objectContentType)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if st.ContentType != objectContentType {
- t.Fatalf("Error: Content types don't match, expected: %+v, found: %+v\n", objectContentType, st.ContentType)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test PutObject using a large data to trigger multipart readat
-func TestPutObjectWithMetadata(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data using 2 parts
- // Use different data in each part for multipart tests to ensure part order at the end.
- var buf []byte
- for i := 0; i < 2; i++ {
- buf = append(buf, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- // Object custom metadata
- customContentType := "custom/contenttype"
-
- n, err := c.PutObjectWithMetadata(bucketName, objectName, bytes.NewReader(buf), map[string][]string{
- "Content-Type": {customContentType},
- }, nil)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if st.ContentType != customContentType {
- t.Fatalf("Error: Expected and found content types do not match, want %v, got %v\n",
- customContentType, st.ContentType)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test put object with streaming signature.
-func TestPutObjectStreaming(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()),
- "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Upload an object.
- sizes := []int64{0, 64*1024 - 1, 64 * 1024}
- objectName := "test-object"
- for i, size := range sizes {
- data := bytes.Repeat([]byte("a"), int(size))
- n, err := c.PutObjectStreaming(bucketName, objectName, bytes.NewReader(data))
- if err != nil {
- t.Fatalf("Test %d Error: %v %s %s", i+1, err, bucketName, objectName)
- }
-
- if n != size {
- t.Errorf("Test %d Expected upload object size %d but got %d", i+1, size, n)
- }
- }
-
- // Remove the object.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Remove the bucket.
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test listing no partially uploaded objects upon putObject error.
-func TestListNoPartiallyUploadedObjects(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("0"), minPartSize*2))
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, cerr := io.CopyN(writer, r, (minPartSize*2)/25)
- if cerr != nil {
- t.Fatal("Error:", cerr, bucketName)
- }
- i++
- r.Seek(0, 0)
- }
- writer.CloseWithError(errors.New("proactively closed to be verified later"))
- }()
-
- objectName := bucketName + "-resumable"
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err == nil {
- t.Fatal("Error: PutObject should fail.")
- }
- if !strings.Contains(err.Error(), "proactively closed to be verified later") {
- t.Fatal("Error:", err)
- }
-
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- isRecursive := true
- multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
-
- var activeUploads bool
- for multiPartObject := range multiPartObjectCh {
- if multiPartObject.Err != nil {
- t.Fatalf("Error: Error when listing incomplete upload")
- }
- activeUploads = true
- }
- if activeUploads {
- t.Errorf("There should be no active uploads in progress upon error for %s/%s", bucketName, objectName)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test get object seeker from the end, using whence set to '2'.
-func TestGetOjectSeekEnd(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- pos, err := r.Seek(-100, 2)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if pos != st.Size-100 {
- t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
- }
- buf2 := make([]byte, 100)
- m, err := io.ReadFull(r, buf2)
- if err != nil {
- t.Fatal("Error: reading through io.ReadFull", err, bucketName, objectName)
- }
- if m != len(buf2) {
- t.Fatalf("Expected %d bytes, got %d", len(buf2), m)
- }
- hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
- hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
- if hexBuf1 != hexBuf2 {
- t.Fatalf("Expected %s, got %s instead", hexBuf1, hexBuf2)
- }
- pos, err = r.Seek(-100, 2)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if pos != st.Size-100 {
- t.Fatalf("Expected %d, got %d instead", pos, st.Size-100)
- }
- if err = r.Close(); err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-}
-
-// Test get object reader to not throw error on being closed twice.
-func TestGetObjectClosedTwice(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test removing multiple objects with Remove API
-func TestRemoveMultipleObjects(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
-
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
-
- // Multi remove of 1100 objects
- nrObjects := 1100
-
- objectsCh := make(chan string)
-
- go func() {
- defer close(objectsCh)
- // Upload objects and send them to objectsCh
- for i := 0; i < nrObjects; i++ {
- objectName := "sample" + strconv.Itoa(i) + ".txt"
- _, err = c.PutObject(bucketName, objectName, r, "application/octet-stream")
- if err != nil {
- t.Error("Error: PutObject shouldn't fail.", err)
- continue
- }
- objectsCh <- objectName
- }
- }()
-
- // Call RemoveObjects API
- errorCh := c.RemoveObjects(bucketName, objectsCh)
-
- // Check if errorCh doesn't receive any error
- select {
- case r, more := <-errorCh:
- if more {
- t.Fatalf("Unexpected error, objName(%v) err(%v)", r.ObjectName, r.Err)
- }
- }
-
- // Clean the bucket created by the test
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests FPutObject of a big file to trigger multipart
-func TestFPutObjectMultipart(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a temp file with minPartSize*4 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
- var buffer []byte
- for i := 0; i < 4; i++ {
- buffer = append(buffer, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
- }
-
- size, err := file.Write(buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
- objectContentType := "testapplication/octet-stream"
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), objectContentType)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*4) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
-
- r, err := c.GetObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatalf("Unexpected error: %v\n", err)
- }
- objInfo, err := r.Stat()
- if err != nil {
- t.Fatalf("Unexpected error: %v\n", err)
- }
- if objInfo.Size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
- if objInfo.ContentType != objectContentType {
- t.Fatalf("Error: Content types don't match, want %v, got %v\n", objectContentType, objInfo.ContentType)
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests FPutObject hidden contentType setting
-func TestFPutObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a temp file with minPartSize*4 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload 4 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
- // Use different data in part for multipart tests to check parts are uploaded in correct order.
- var buffer []byte
- for i := 0; i < 4; i++ {
- buffer = append(buffer, bytes.Repeat([]byte(string('a'+i)), minPartSize)...)
- }
-
- // Write the data to the file.
- size, err := file.Write(buffer)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != minPartSize*4 {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, size)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err := c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*4) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*4) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
-
- // Add extension to temp file name
- fileName := file.Name()
- err = os.Rename(file.Name(), fileName+".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(minPartSize*4) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", minPartSize*4, n)
- }
-
- // Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-standard")
- }
- if rStandard.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-Octet")
- }
- if rOctet.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-GTar")
- }
- if rGTar.ContentType != "application/x-gtar" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(fileName + ".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-// Tests get object ReaderSeeker interface methods.
-func TestGetObjectReadSeekFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
- bufSize := len(buf)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(bufSize) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- defer func() {
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- }()
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(bufSize) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- // This following function helps us to compare data from the reader after seek
- // with the data from the original buffer
- cmpData := func(r io.Reader, start, end int) {
- if end-start == 0 {
- return
- }
- buffer := bytes.NewBuffer([]byte{})
- if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- if !bytes.Equal(buf[start:end], buffer.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
- }
-
- // Generic seek error for errors other than io.EOF
- seekErr := errors.New("seek error")
-
- testCases := []struct {
- offset int64
- whence int
- pos int64
- err error
- shouldCmp bool
- start int
- end int
- }{
- // Start from offset 0, fetch data and compare
- {0, 0, 0, nil, true, 0, 0},
- // Start from offset 2048, fetch data and compare
- {2048, 0, 2048, nil, true, 2048, bufSize},
- // Start from offset larger than possible
- {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
- // Move to offset 0 without comparing
- {0, 0, 0, nil, false, 0, 0},
- // Move one step forward and compare
- {1, 1, 1, nil, true, 1, bufSize},
- // Move larger than possible
- {int64(bufSize), 1, 0, seekErr, false, 0, 0},
- // Provide negative offset with CUR_SEEK
- {int64(-1), 1, 0, seekErr, false, 0, 0},
- // Test with whence SEEK_END and with positive offset
- {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
- // Test with whence SEEK_END and with negative offset
- {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
- // Test with whence SEEK_END and with large negative offset
- {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
- }
-
- for i, testCase := range testCases {
- // Perform seek operation
- n, err := r.Seek(testCase.offset, testCase.whence)
- // We expect an error
- if testCase.err == seekErr && err == nil {
- t.Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
- }
- // We expect a specific error
- if testCase.err != seekErr && testCase.err != err {
- t.Fatalf("Test %d, unexpected err value: expected: %v, found: %v", i+1, testCase.err, err)
- }
- // If we expect an error go to the next loop
- if testCase.err != nil {
- continue
- }
- // Check the returned seek pos
- if n != testCase.pos {
- t.Fatalf("Test %d, error: number of bytes seeked does not match, want %v, got %v\n", i+1,
- testCase.pos, n)
- }
- // Compare only if shouldCmp is activated
- if testCase.shouldCmp {
- cmpData(r, testCase.start, testCase.end)
- }
- }
-}
-
-// Tests get object ReaderAt interface methods.
-func TestGetObjectReadAtFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- offset := int64(2048)
-
- // read directly
- buf1 := make([]byte, 512)
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- // Test readAt before stat is called.
- m, err := r.ReadAt(buf1, offset)
- if err != nil {
- t.Fatal("Error:", err, len(buf1), offset)
- }
- if m != len(buf1) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf1))
- }
- if !bytes.Equal(buf1, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- m, err = r.ReadAt(buf2, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf2), offset)
- }
- if m != len(buf2) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf3), offset)
- }
- if m != len(buf3) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf4), offset)
- }
- if m != len(buf4) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf5))
- }
- }
- if m != len(buf5) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
- }
- if !bytes.Equal(buf, buf5) {
- t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf6))
- }
- }
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test Presigned Post Policy
-func TestPresignedPostPolicy(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping functional tests for short runs")
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("4"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
- }
-
- policy := NewPostPolicy()
-
- if err := policy.SetBucket(""); err == nil {
- t.Fatalf("Error: %s", err)
- }
- if err := policy.SetKey(""); err == nil {
- t.Fatalf("Error: %s", err)
- }
- if err := policy.SetKeyStartsWith(""); err == nil {
- t.Fatalf("Error: %s", err)
- }
- if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
- t.Fatalf("Error: %s", err)
- }
- if err := policy.SetContentType(""); err == nil {
- t.Fatalf("Error: %s", err)
- }
- if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
- t.Fatalf("Error: %s", err)
- }
-
- policy.SetBucket(bucketName)
- policy.SetKey(objectName)
- policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
- policy.SetContentType("image/png")
- policy.SetContentLengthRange(1024, 1024*1024)
-
- _, _, err = c.PresignedPostPolicy(policy)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- policy = NewPostPolicy()
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests copy object
-func TestCopyObject(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping functional tests for short runs")
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy")
- }
-
- // Generate data more than 32K
- buf := bytes.Repeat([]byte("5"), rand.Intn(1<<20)+32*1024)
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
- }
-
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Check the various fields of source object against destination object.
- objInfo, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Copy Source
- src := NewSourceInfo(bucketName, objectName, nil)
-
- // Set copy conditions.
-
- // All invalid conditions first.
- err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- t.Fatal("Error:", err)
- }
- err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
- if err == nil {
- t.Fatal("Error:", err)
- }
- err = src.SetMatchETagCond("")
- if err == nil {
- t.Fatal("Error:", err)
- }
- err = src.SetMatchETagExceptCond("")
- if err == nil {
- t.Fatal("Error:", err)
- }
-
- err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = src.SetMatchETagCond(objInfo.ETag)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- dst, err := NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
- if err != nil {
- t.Fatal(err)
- }
-
- // Perform the Copy
- err = c.CopyObject(dst, src)
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
- }
-
- // Source object
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Check the various fields of source object against destination object.
- objInfo, err = reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if objInfo.Size != objInfoCopy.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
- }
-
- // CopyObject again but with wrong conditions
- src = NewSourceInfo(bucketName, objectName, nil)
- err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = src.SetMatchETagExceptCond(objInfo.ETag)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform the Copy which should fail
- err = c.CopyObject(dst, src)
- if err == nil {
- t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy should fail")
- }
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName + "-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// TestEncryptionPutGet tests client side encryption
-func TestEncryptionPutGet(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate a symmetric key
- symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
-
- // Generate an assymmetric key from predefine public and private certificates
- privateKey, err := hex.DecodeString(
- "30820277020100300d06092a864886f70d0101010500048202613082025d" +
- "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
- "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
- "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
- "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
- "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
- "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
- "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
- "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
- "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
- "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
- "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
- "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
- "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
- "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
- "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
- "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
- "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
- "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
- "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
- "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
- "9945cb5c7d")
-
- if err != nil {
- t.Fatal(err)
- }
-
- publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
- "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
- "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
- "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
- "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
- "80a89e43f29b570203010001")
- if err != nil {
- t.Fatal(err)
- }
-
- // Generate an asymmetric key
- asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
- if err != nil {
- t.Fatal(err)
- }
-
- // Object custom metadata
- customContentType := "custom/contenttype"
-
- testCases := []struct {
- buf []byte
- encKey encrypt.Key
- }{
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
- {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
-
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
- {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
- }
-
- for i, testCase := range testCases {
- // Generate a random object name
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
-
- // Secured object
- cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
- if err != nil {
- t.Fatal(err)
- }
-
- // Put encrypted data
- _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials, map[string][]string{"Content-Type": {customContentType}}, nil)
- if err != nil {
- t.Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
- }
-
- // Read the data back
- r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
- if err != nil {
- t.Fatalf("Test %d, error: %v %v %v", i+1, err, bucketName, objectName)
- }
- defer r.Close()
-
- // Compare the sent object with the received one
- recvBuffer := bytes.NewBuffer([]byte{})
- if _, err = io.Copy(recvBuffer, r); err != nil {
- t.Fatalf("Test %d, error: %v", i+1, err)
- }
- if recvBuffer.Len() != len(testCase.buf) {
- t.Fatalf("Test %d, error: number of bytes of received object does not match, want %v, got %v\n",
- i+1, len(testCase.buf), recvBuffer.Len())
- }
- if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
- t.Fatalf("Test %d, error: Encrypted sent is not equal to decrypted, want `%x`, go `%x`", i+1, testCase.buf, recvBuffer.Bytes())
- }
-
- // Remove test object
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatalf("Test %d, error: %v", i+1, err)
- }
-
- }
-
- // Remove test bucket
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-func TestBucketNotification(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
- if os.Getenv("NOTIFY_BUCKET") == "" ||
- os.Getenv("NOTIFY_SERVICE") == "" ||
- os.Getenv("NOTIFY_REGION") == "" ||
- os.Getenv("NOTIFY_ACCOUNTID") == "" ||
- os.Getenv("NOTIFY_RESOURCE") == "" {
- t.Skip("skipping notification test if not configured")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- bucketName := os.Getenv("NOTIFY_BUCKET")
-
- topicArn := NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
- queueArn := NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
-
- topicConfig := NewNotificationConfig(topicArn)
- topicConfig.AddEvents(ObjectCreatedAll, ObjectRemovedAll)
- topicConfig.AddFilterSuffix("jpg")
-
- queueConfig := NewNotificationConfig(queueArn)
- queueConfig.AddEvents(ObjectCreatedAll)
- queueConfig.AddFilterPrefix("photos/")
-
- bNotification := BucketNotification{}
- bNotification.AddTopic(topicConfig)
-
- // Add the same topicConfig again, should have no effect
- // because it is duplicated
- bNotification.AddTopic(topicConfig)
- if len(bNotification.TopicConfigs) != 1 {
- t.Fatal("Error: duplicated entry added")
- }
-
- // Add and remove a queue config
- bNotification.AddQueue(queueConfig)
- bNotification.RemoveQueueByArn(queueArn)
-
- err = c.SetBucketNotification(bucketName, bNotification)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- bNotification, err = c.GetBucketNotification(bucketName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if len(bNotification.TopicConfigs) != 1 {
- t.Fatal("Error: Topic config is empty")
- }
-
- if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
- t.Fatal("Error: cannot get the suffix")
- }
-
- err = c.RemoveAllBucketNotification(bucketName)
- if err != nil {
- t.Fatal("Error: cannot delete bucket notification")
- }
-}
-
-// Tests comprehensive list of all methods.
-func TestFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := New(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- file, err := os.Create(fileName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- var exists bool
- exists, err = c.BucketExists(bucketName)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if !exists {
- t.Fatal("Error: could not find ", bucketName)
- }
-
- // Asserting the default bucket policy.
- policyAccess, err := c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policyAccess != "none" {
- t.Fatalf("Default bucket policy incorrect")
- }
- // Set the bucket policy to 'public readonly'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `readonly`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policyAccess != "readonly" {
- t.Fatalf("Expected bucket policy to be readonly")
- }
-
- // Make the bucket 'public writeonly'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `writeonly`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policyAccess != "writeonly" {
- t.Fatalf("Expected bucket policy to be writeonly")
- }
- // Make the bucket 'public read/write'.
- err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `readwrite`.
- policyAccess, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policyAccess != "readwrite" {
- t.Fatalf("Expected bucket policy to be readwrite")
- }
- // List all buckets.
- buckets, err := c.ListBuckets()
- if len(buckets) == 0 {
- t.Fatal("Error: list buckets cannot be empty", buckets)
- }
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- t.Fatal("Error: bucket ", bucketName, "not found")
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := bytes.Repeat([]byte("f"), 1<<19)
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if n != int64(len(buf)) {
- t.Fatal("Error: bad length ", n, len(buf))
- }
-
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-nolength")
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- objFound = false
- isRecursive = true // Recursive is true.
- for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- incompObjNotFound := true
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- t.Fatal("Error: unexpected dangling incomplete upload found.")
- }
-
- newReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Generate presigned GET object url.
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch for presigned GET URL.")
- }
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
- }
-
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- buf = bytes.Repeat([]byte("g"), 1<<19)
-
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- t.Fatal("Error: ", err)
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-nolength")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err == nil {
- t.Fatal("Error:")
- }
- if err.Error() != "The specified bucket does not exist" {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName); err != nil {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- t.Fatal("Error: ", err)
- }
-}
-
-// Test for validating GetObject Reader* methods functioning when the
-// object is modified in the object store.
-func TestGetObjectObjectModified(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object.
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- defer c.RemoveBucket(bucketName)
-
- // Upload an object.
- objectName := "myobject"
- content := "helloworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), "application/text")
- if err != nil {
- t.Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
- }
-
- defer c.RemoveObject(bucketName, objectName)
-
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatalf("Failed to get object %s/%s: %v", bucketName, objectName, err)
- }
- defer reader.Close()
-
- // Read a few bytes of the object.
- b := make([]byte, 5)
- n, err := reader.ReadAt(b, 0)
- if err != nil {
- t.Fatalf("Failed to read object %s/%s at an offset: %v", bucketName, objectName, err)
- }
-
- // Upload different contents to the same object while object is being read.
- newContent := "goodbyeworld"
- _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), "application/text")
- if err != nil {
- t.Fatalf("Failed to upload %s/%s: %v", bucketName, objectName, err)
- }
-
- // Confirm that a Stat() call in between doesn't change the Object's cached etag.
- _, err = reader.Stat()
- if err.Error() != s3ErrorResponseMap["PreconditionFailed"] {
- t.Errorf("Expected Stat to fail with error %s but received %s", s3ErrorResponseMap["PreconditionFailed"], err.Error())
- }
-
- // Read again only to find object contents have been modified since last read.
- _, err = reader.ReadAt(b, int64(n))
- if err.Error() != s3ErrorResponseMap["PreconditionFailed"] {
- t.Errorf("Expected ReadAt to fail with error %s but received %s", s3ErrorResponseMap["PreconditionFailed"], err.Error())
- }
-}
-
-// Test validates putObject to upload a file seeked at a given offset.
-func TestPutObjectUploadSeekedObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object.
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Make a new bucket.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- defer c.RemoveBucket(bucketName)
-
- tempfile, err := ioutil.TempFile("", "minio-go-upload-test-")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- var length = 120000
- data := bytes.Repeat([]byte("1"), length)
-
- if _, err = tempfile.Write(data); err != nil {
- t.Fatal("Error:", err)
- }
-
- objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
-
- offset := length / 2
- if _, err := tempfile.Seek(int64(offset), 0); err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err := c.PutObject(bucketName, objectName, tempfile, "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(length-offset) {
- t.Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
- }
- tempfile.Close()
- if err = os.Remove(tempfile.Name()); err != nil {
- t.Fatal("Error:", err)
- }
-
- length = int(n)
-
- obj, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err = obj.Seek(int64(offset), 0)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(offset) {
- t.Fatalf("Invalid offset returned, want %v, got %v", int64(offset), n)
- }
-
- n, err = c.PutObject(bucketName, objectName+"getobject", obj, "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(length-offset) {
- t.Fatalf("Invalid length returned, want %v, got %v", int64(length-offset), n)
- }
-
- if err = c.RemoveObject(bucketName, objectName); err != nil {
- t.Fatal("Error:", err)
- }
-
- if err = c.RemoveObject(bucketName, objectName+"getobject"); err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Test expected error cases
-func TestComposeObjectErrorCases(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- testComposeObjectErrorCases(c, t)
-}
-
-// Test concatenating 10K objects
-func TestCompose10KSources(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- testComposeMultipleSources(c, t)
-}
-
-// Test encrypted copy object
-func TestEncryptedCopyObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // c.TraceOn(os.Stderr)
- testEncryptedCopyObject(c, t)
-}
-
-func TestUserMetadataCopying(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Instantiate new minio client object
- c, err := NewV4(
- os.Getenv(serverEndpoint),
- os.Getenv(accessKey),
- os.Getenv(secretKey),
- mustParseBool(os.Getenv(enableSecurity)),
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // c.TraceOn(os.Stderr)
- testUserMetadataCopying(c, t)
-}
diff --git a/api_unit_test.go b/api_unit_test.go
index 2a9db3c..96fd8dd 100644
--- a/api_unit_test.go
+++ b/api_unit_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,13 +18,8 @@
package minio
import (
- "bytes"
- "io"
- "io/ioutil"
"net/http"
"net/url"
- "os"
- "strings"
"testing"
"github.com/minio/minio-go/pkg/credentials"
@@ -41,126 +36,47 @@ func (c *customReader) Size() (n int64) {
return 10
}
-// Tests getReaderSize() for various Reader types.
-func TestGetReaderSize(t *testing.T) {
- var reader io.Reader
- size, err := getReaderSize(reader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Reader shouldn't have any length.")
- }
-
- bytesReader := bytes.NewReader([]byte("Hello World"))
- size, err = getReaderSize(bytesReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(len("Hello World")) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
- }
-
- size, err = getReaderSize(new(customReader))
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(10) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
- }
-
- stringsReader := strings.NewReader("Hello World")
- size, err = getReaderSize(stringsReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(len("Hello World")) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, len("Hello World"))
- }
-
- // Create request channel.
- reqCh := make(chan getRequest, 1)
- // Create response channel.
- resCh := make(chan getResponse, 1)
- // Create done channel.
- doneCh := make(chan struct{})
-
- objectInfo := ObjectInfo{Size: 10}
- // Create the first request.
- firstReq := getRequest{
- isReadOp: false, // Perform only a HEAD object to get objectInfo.
- isFirstReq: true,
- }
- // Create the expected response.
- firstRes := getResponse{
- objectInfo: objectInfo,
- }
- // Send the expected response.
- resCh <- firstRes
-
- // Test setting size on the first request.
- objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
- defer objectReaderFirstReq.Close()
- // Not checking the response here...just that the reader size is correct.
- _, err = objectReaderFirstReq.doGetRequest(firstReq)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Validate that the reader size is the objectInfo size.
- size, err = getReaderSize(objectReaderFirstReq)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != int64(10) {
- t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
- }
-
- fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
- if err != nil {
- t.Fatal("Error:", err)
- }
- defer fileReader.Close()
- defer os.RemoveAll(fileReader.Name())
-
- size, err = getReaderSize(fileReader)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size == -1 {
- t.Fatal("Reader length for file cannot be -1.")
+// Tests get region from host URL.
+func TestGetRegionFromURL(t *testing.T) {
+ testCases := []struct {
+ u url.URL
+ expectedRegion string
+ }{
+ {
+ u: url.URL{Host: "storage.googleapis.com"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3.cn-north-1.amazonaws.com.cn"},
+ expectedRegion: "cn-north-1",
+ },
+ {
+ u: url.URL{Host: "s3-fips-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "s3-us-gov-west-1.amazonaws.com"},
+ expectedRegion: "us-gov-west-1",
+ },
+ {
+ u: url.URL{Host: "192.168.1.1"},
+ expectedRegion: "",
+ },
+ {
+ u: url.URL{Host: "s3-eu-west-1.amazonaws.com"},
+ expectedRegion: "eu-west-1",
+ },
+ {
+ u: url.URL{Host: "s3.amazonaws.com"},
+ expectedRegion: "",
+ },
}
- // Verify for standard input, output and error file descriptors.
- size, err = getReaderSize(os.Stdin)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stdin should have length of -1.")
- }
- size, err = getReaderSize(os.Stdout)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stdout should have length of -1.")
- }
- size, err = getReaderSize(os.Stderr)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if size != -1 {
- t.Fatal("Stderr should have length of -1.")
- }
- file, err := os.Open(os.TempDir())
- if err != nil {
- t.Fatal("Error:", err)
- }
- defer file.Close()
- _, err = getReaderSize(file)
- if err == nil {
- t.Fatal("Input file as directory should throw an error.")
+ for i, testCase := range testCases {
+ region := getRegionFromURL(testCase.u)
+ if testCase.expectedRegion != region {
+ t.Errorf("Test %d: Expected region %s, got %s", i+1, testCase.expectedRegion, region)
+ }
}
}
@@ -308,7 +224,7 @@ func TestMakeTargetURL(t *testing.T) {
// Test 6
{"localhost:9000", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 7, testing with query
- {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": []string{"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
+ {"localhost:9000", false, "mybucket", "myobject", "", map[string][]string{"param": {"val"}}, url.URL{Host: "localhost:9000", Scheme: "http", Path: "/mybucket/myobject", RawQuery: "param=val"}, nil},
// Test 8, testing with port 80
{"localhost:80", false, "mybucket", "myobject", "", nil, url.URL{Host: "localhost", Scheme: "http", Path: "/mybucket/myobject"}, nil},
// Test 9, testing with port 443
diff --git a/appveyor.yml b/appveyor.yml
index 4f5c1b3..79c7a15 100644
--- a/appveyor.yml
+++ b/appveyor.yml
@@ -18,9 +18,10 @@ install:
- go env
- go get -u github.com/golang/lint/golint
- go get -u github.com/go-ini/ini
- - go get -u github.com/minio/go-homedir
+ - go get -u github.com/mitchellh/go-homedir
- go get -u github.com/remyoudompheng/go-misc/deadcode
- go get -u github.com/gordonklaus/ineffassign
+ - go get -u github.com/dustin/go-humanize
# to run your custom scripts instead of automatic MSBuild
build_script:
diff --git a/bucket-cache.go b/bucket-cache.go
index 6d2a40f..5d56cdf 100644
--- a/bucket-cache.go
+++ b/bucket-cache.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,7 +18,6 @@
package minio
import (
- "encoding/hex"
"net/http"
"net/url"
"path"
@@ -91,20 +90,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return c.region, nil
}
- if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
- // For china specifically we need to set everything to
- // cn-north-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // China region.
- return "cn-north-1", nil
- } else if s3utils.IsAmazonGovCloudEndpoint(c.endpointURL) {
- // For us-gov specifically we need to set everything to
- // us-gov-west-1 for now, there is no easier way until AWS S3
- // provides a cleaner compatible API across "us-east-1" and
- // Gov cloud region.
- return "us-gov-west-1", nil
- }
-
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
@@ -213,20 +198,22 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
signerType = credentials.SignatureAnonymous
}
- // Set sha256 sum for signature calculation only with signature version '4'.
- switch {
- case signerType.IsV4():
- var contentSha256 string
- if c.secure {
- contentSha256 = unsignedPayload
- } else {
- contentSha256 = hex.EncodeToString(sum256([]byte{}))
- }
- req.Header.Set("X-Amz-Content-Sha256", contentSha256)
- req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
- case signerType.IsV2():
+ if signerType.IsAnonymous() {
+ return req, nil
+ }
+
+ if signerType.IsV2() {
req = s3signer.SignV2(*req, accessKeyID, secretAccessKey)
+ return req, nil
+ }
+
+ // Set sha256 sum for signature calculation only with signature version '4'.
+ contentSha256 := emptySHA256Hex
+ if c.secure {
+ contentSha256 = unsignedPayload
}
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
+ req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
return req, nil
}
diff --git a/bucket-cache_test.go b/bucket-cache_test.go
index 6ae4e7b..fd7e7f3 100644
--- a/bucket-cache_test.go
+++ b/bucket-cache_test.go
@@ -1,6 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2015, 2016, 2017 Minio, Inc.
+ * Copyright
+ * 2015, 2016, 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,7 +19,6 @@ package minio
import (
"bytes"
- "encoding/hex"
"encoding/xml"
"io/ioutil"
"net/http"
@@ -116,11 +115,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// with signature version '4'.
switch {
case signerType.IsV4():
- var contentSha256 string
+ contentSha256 := emptySHA256Hex
if c.secure {
contentSha256 = unsignedPayload
- } else {
- contentSha256 = hex.EncodeToString(sum256([]byte{}))
}
req.Header.Set("X-Amz-Content-Sha256", contentSha256)
req = s3signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, "us-east-1")
diff --git a/bucket-notification.go b/bucket-notification.go
index 5ac52e5..1b9d6a0 100644
--- a/bucket-notification.go
+++ b/bucket-notification.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/constants.go b/constants.go
index 9771d2f..b5945e7 100644
--- a/constants.go
+++ b/constants.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -50,7 +51,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
const unsignedPayload = "UNSIGNED-PAYLOAD"
// Total number of parallel workers used for multipart operation.
-var totalWorkers = 3
+const totalWorkers = 4
// Signature related constants.
const (
diff --git a/core.go b/core.go
index 4b1054a..4245fc0 100644
--- a/core.go
+++ b/core.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,7 +18,9 @@
package minio
import (
+ "context"
"io"
+ "strings"
"github.com/minio/minio-go/pkg/policy"
)
@@ -52,14 +55,44 @@ func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string,
return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys)
}
+// CopyObject - copies an object from source object to destination object on server side.
+func (c Core) CopyObject(sourceBucket, sourceObject, destBucket, destObject string, metadata map[string]string) (ObjectInfo, error) {
+ return c.copyObjectDo(context.Background(), sourceBucket, sourceObject, destBucket, destObject, metadata)
+}
+
+// CopyObjectPart - creates a part in a multipart upload by copying (a
+// part of) an existing object.
+func (c Core) CopyObjectPart(srcBucket, srcObject, destBucket, destObject string, uploadID string,
+ partID int, startOffset, length int64, metadata map[string]string) (p CompletePart, err error) {
+
+ return c.copyObjectPartDo(context.Background(), srcBucket, srcObject, destBucket, destObject, uploadID,
+ partID, startOffset, length, metadata)
+}
+
// PutObject - Upload object. Uploads using single PUT call.
-func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) {
- return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata)
+func (c Core) PutObject(bucket, object string, data io.Reader, size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectInfo, error) {
+ opts := PutObjectOptions{}
+ m := make(map[string]string)
+ for k, v := range metadata {
+ if strings.ToLower(k) == "content-encoding" {
+ opts.ContentEncoding = v
+ } else if strings.ToLower(k) == "content-disposition" {
+ opts.ContentDisposition = v
+ } else if strings.ToLower(k) == "content-type" {
+ opts.ContentType = v
+ } else if strings.ToLower(k) == "cache-control" {
+ opts.CacheControl = v
+ } else {
+ m[k] = metadata[k]
+ }
+ }
+ opts.UserMetadata = m
+ return c.putObjectDo(context.Background(), bucket, object, data, md5Base64, sha256Hex, size, opts)
}
-// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID.
-func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) {
- result, err := c.initiateMultipartUpload(bucket, object, metadata)
+// NewMultipartUpload - Initiates new multipart upload and returns the new uploadID.
+func (c Core) NewMultipartUpload(bucket, object string, opts PutObjectOptions) (uploadID string, err error) {
+ result, err := c.initiateMultipartUpload(context.Background(), bucket, object, opts)
return result.UploadID, err
}
@@ -69,14 +102,14 @@ func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, de
}
// PutObjectPart - Upload an object part.
-func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) {
- return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, size, data, md5Sum, sha256Sum, nil)
+func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, data io.Reader, size int64, md5Base64, sha256Hex string) (ObjectPart, error) {
+ return c.PutObjectPartWithMetadata(bucket, object, uploadID, partID, data, size, md5Base64, sha256Hex, nil)
}
// PutObjectPartWithMetadata - upload an object part with additional request metadata.
-func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int,
- size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectPart, error) {
- return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size, metadata)
+func (c Core) PutObjectPartWithMetadata(bucket, object, uploadID string, partID int, data io.Reader,
+ size int64, md5Base64, sha256Hex string, metadata map[string]string) (ObjectPart, error) {
+ return c.uploadPart(context.Background(), bucket, object, uploadID, data, partID, md5Base64, sha256Hex, size, metadata)
}
// ListObjectParts - List uploaded parts of an incomplete upload.x
@@ -86,7 +119,7 @@ func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker
// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object.
func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error {
- _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{
+ _, err := c.completeMultipartUpload(context.Background(), bucket, object, uploadID, completeMultipartUpload{
Parts: parts,
})
return err
@@ -94,7 +127,7 @@ func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []C
// AbortMultipartUpload - Abort an incomplete upload.
func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error {
- return c.abortMultipartUpload(bucket, object, uploadID)
+ return c.abortMultipartUpload(context.Background(), bucket, object, uploadID)
}
// GetBucketPolicy - fetches bucket access policy for a given bucket.
@@ -110,12 +143,12 @@ func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPol
// GetObject is a lower level API implemented to support reading
// partial objects and also downloading objects with special conditions
// matching etag, modtime etc.
-func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) {
- return c.getObject(bucketName, objectName, reqHeaders)
+func (c Core) GetObject(bucketName, objectName string, opts GetObjectOptions) (io.ReadCloser, ObjectInfo, error) {
+ return c.getObject(context.Background(), bucketName, objectName, opts)
}
// StatObject is a lower level API implemented to support special
// conditions matching etag, modtime on a request.
-func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) {
- return c.statObject(bucketName, objectName, reqHeaders)
+func (c Core) StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error) {
+ return c.statObject(context.Background(), bucketName, objectName, opts)
}
diff --git a/core_test.go b/core_test.go
index 81e1cd5..8cf8104 100644
--- a/core_test.go
+++ b/core_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,14 +19,14 @@ package minio
import (
"bytes"
- "crypto/md5"
-
"io"
- "math/rand"
+ "log"
"os"
"reflect"
"testing"
"time"
+
+ "math/rand"
)
const (
@@ -35,6 +36,33 @@ const (
enableSecurity = "ENABLE_HTTPS"
)
+// Minimum part size
+const MinPartSize = 1024 * 1024 * 64
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+ letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
+)
+
+// randString generates random names and prepends them with a known prefix.
+func randString(n int, src rand.Source, prefix string) string {
+ b := make([]byte, n)
+ // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
+ for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
// Tests for Core GetObject() function.
func TestGetObjectCore(t *testing.T) {
if testing.Short() {
@@ -75,7 +103,9 @@ func TestGetObjectCore(t *testing.T) {
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
- n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
+ n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
+ ContentType: "binary/octet-stream",
+ })
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -84,8 +114,6 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
}
- reqHeaders := NewGetReqHeaders()
-
offset := int64(2048)
// read directly
@@ -94,8 +122,9 @@ func TestGetObjectCore(t *testing.T) {
buf3 := make([]byte, n)
buf4 := make([]byte, 1)
- reqHeaders.SetRange(offset, offset+int64(len(buf1))-1)
- reader, objectInfo, err := c.GetObject(bucketName, objectName, reqHeaders)
+ opts := GetObjectOptions{}
+ opts.SetRange(offset, offset+int64(len(buf1))-1)
+ reader, objectInfo, err := c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -113,8 +142,8 @@ func TestGetObjectCore(t *testing.T) {
}
offset += 512
- reqHeaders.SetRange(offset, offset+int64(len(buf2))-1)
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(offset, offset+int64(len(buf2))-1)
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -132,8 +161,8 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect read between two GetObject from same offset.")
}
- reqHeaders.SetRange(0, int64(len(buf3)))
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts.SetRange(0, int64(len(buf3)))
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -152,9 +181,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetMatchETag("etag")
- _, _, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetMatchETag("etag")
+ _, _, err = c.GetObject(bucketName, objectName, opts)
if err == nil {
t.Fatal("Unexpected GetObject should fail with mismatching etags")
}
@@ -162,9 +191,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatalf("Expected \"PreconditionFailed\" as code, got %s instead", errResp.Code)
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetMatchETagExcept("etag")
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetMatchETagExcept("etag")
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -182,9 +211,9 @@ func TestGetObjectCore(t *testing.T) {
t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
}
- reqHeaders = NewGetReqHeaders()
- reqHeaders.SetRange(0, 0)
- reader, objectInfo, err = c.GetObject(bucketName, objectName, reqHeaders)
+ opts = GetObjectOptions{}
+ opts.SetRange(0, 0)
+ reader, objectInfo, err = c.GetObject(bucketName, objectName, opts)
if err != nil {
t.Fatal(err)
}
@@ -209,6 +238,75 @@ func TestGetObjectCore(t *testing.T) {
}
}
+// Tests GetObject to return Content-Encoding properly set
+// and overrides any auto decoding.
+func TestGetObjectContentEncoding(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio core client object.
+ c, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Generate data more than 32K
+ buf := bytes.Repeat([]byte("3"), rand.Intn(1<<20)+32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.Client.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), PutObjectOptions{
+ ContentEncoding: "gzip",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if n != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
+ }
+
+ rwc, objInfo, err := c.GetObject(bucketName, objectName, GetObjectOptions{})
+ if err != nil {
+ t.Fatalf("Error: %v", err)
+ }
+ rwc.Close()
+ if objInfo.Size <= 0 {
+ t.Fatalf("Unexpected size of the object %v, expected %v", objInfo.Size, n)
+ }
+ value, ok := objInfo.Metadata["Content-Encoding"]
+ if !ok {
+ t.Fatalf("Expected Content-Encoding metadata to be set.")
+ }
+ if value[0] != "gzip" {
+ t.Fatalf("Unexpected content-encoding found, want gzip, got %v", value)
+ }
+}
+
// Tests get bucket policy core API.
func TestGetBucketPolicy(t *testing.T) {
if testing.Short() {
@@ -272,6 +370,265 @@ func TestGetBucketPolicy(t *testing.T) {
}
}
+// Tests Core CopyObject API implementation.
+func TestCoreCopyObject(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ buf := bytes.Repeat([]byte("a"), 32*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
+ "Content-Type": "binary/octet-stream",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if objInfo.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ cobjInfo, err := c.CopyObject(bucketName, objectName, destBucketName, destObjectName, map[string]string{
+ "X-Amz-Metadata-Directive": "REPLACE",
+ "Content-Type": "application/javascript",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName, destBucketName, destObjectName)
+ }
+ if cobjInfo.ETag != objInfo.ETag {
+ t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, cobjInfo.ETag)
+ }
+
+ // Attempt to read from destBucketName and object name.
+ r, err := c.Client.GetObject(destBucketName, destObjectName, GetObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if st.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
+ len(buf), st.Size)
+ }
+
+ if st.ContentType != "application/javascript" {
+ t.Fatalf("Error: Content types don't match, expected: application/javascript, found: %+v\n", st.ContentType)
+ }
+
+ if st.ETag != objInfo.ETag {
+ t.Fatalf("Error: expected etag to be same as source object %s, but found different etag :%s", objInfo.ETag, st.ETag)
+ }
+
+ if err := r.Close(); err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ if err := r.Close(); err == nil {
+ t.Fatal("Error: object is already closed, should return error")
+ }
+
+ err = c.RemoveObject(bucketName, objectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveObject(destBucketName, destObjectName)
+ if err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ err = c.RemoveBucket(bucketName)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
+// Test Core CopyObjectPart implementation
+func TestCoreCopyObjectPart(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for short runs")
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)),
+ )
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ // Make a buffer with 5MB of data
+ buf := bytes.Repeat([]byte("abcde"), 1024*1024)
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", map[string]string{
+ "Content-Type": "binary/octet-stream",
+ })
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ if objInfo.Size != int64(len(buf)) {
+ t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), objInfo.Size)
+ }
+
+ destBucketName := bucketName
+ destObjectName := objectName + "-dest"
+
+ uploadID, err := c.NewMultipartUpload(destBucketName, destObjectName, PutObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, bucketName, objectName)
+ }
+
+ // Content of the destination object will be two copies of
+ // `objectName` concatenated, followed by first byte of
+ // `objectName`.
+
+ // First of three parts
+ fstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 1, 0, -1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Second of three parts
+ sndPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 2, 0, -1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Last of three parts
+ lstPart, err := c.CopyObjectPart(bucketName, objectName, destBucketName, destObjectName, uploadID, 3, 0, 1, nil)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Complete the multipart upload
+ err = c.CompleteMultipartUpload(destBucketName, destObjectName, uploadID, []CompletePart{fstPart, sndPart, lstPart})
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ // Stat the object and check its length matches
+ objInfo, err = c.StatObject(destBucketName, destObjectName, StatObjectOptions{})
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+
+ if objInfo.Size != (5*1024*1024)*2+1 {
+ t.Fatal("Destination object has incorrect size!")
+ }
+
+ // Now we read the data back
+ getOpts := GetObjectOptions{}
+ getOpts.SetRange(0, 5*1024*1024-1)
+ r, _, err := c.GetObject(destBucketName, destObjectName, getOpts)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ getBuf := make([]byte, 5*1024*1024)
+ _, err = io.ReadFull(r, getBuf)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ if !bytes.Equal(getBuf, buf) {
+ t.Fatal("Got unexpected data in first 5MB")
+ }
+
+ getOpts.SetRange(5*1024*1024, 0)
+ r, _, err = c.GetObject(destBucketName, destObjectName, getOpts)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ getBuf = make([]byte, 5*1024*1024+1)
+ _, err = io.ReadFull(r, getBuf)
+ if err != nil {
+ t.Fatal("Error:", err, destBucketName, destObjectName)
+ }
+ if !bytes.Equal(getBuf[:5*1024*1024], buf) {
+ t.Fatal("Got unexpected data in second 5MB")
+ }
+ if getBuf[5*1024*1024] != buf[0] {
+ t.Fatal("Got unexpected data in last byte of copied object!")
+ }
+
+ if err := c.RemoveObject(destBucketName, destObjectName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if err := c.RemoveObject(bucketName, objectName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ if err := c.RemoveBucket(bucketName); err != nil {
+ t.Fatal("Error: ", err)
+ }
+
+ // Do not need to remove destBucketName its same as bucketName.
+}
+
// Test Core PutObject.
func TestCorePutObject(t *testing.T) {
if testing.Short() {
@@ -307,26 +664,21 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
- buf := bytes.Repeat([]byte("a"), minPartSize)
+ buf := bytes.Repeat([]byte("a"), 32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
// Object content type
objectContentType := "binary/octet-stream"
- metadata := make(map[string][]string)
- metadata["Content-Type"] = []string{objectContentType}
+ metadata := make(map[string]string)
+ metadata["Content-Type"] = objectContentType
- objInfo, err := c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), md5.New().Sum(nil), nil, metadata)
+ objInfo, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "1B2M2Y8AsgTpgAmY7PhCfg==", "", metadata)
if err == nil {
- t.Fatal("Error expected: nil, got: ", err)
+ t.Fatal("Error expected: error, got: nil(success)")
}
- objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, sum256(nil), metadata)
- if err == nil {
- t.Fatal("Error expected: nil, got: ", err)
- }
-
- objInfo, err = c.PutObject(bucketName, objectName, int64(len(buf)), bytes.NewReader(buf), nil, nil, metadata)
+ objInfo, err = c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), "", "", metadata)
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -336,7 +688,7 @@ func TestCorePutObject(t *testing.T) {
}
// Read the data back
- r, err := c.Client.GetObject(bucketName, objectName)
+ r, err := c.Client.GetObject(bucketName, objectName, GetObjectOptions{})
if err != nil {
t.Fatal("Error:", err, bucketName, objectName)
}
@@ -373,3 +725,47 @@ func TestCorePutObject(t *testing.T) {
t.Fatal("Error:", err)
}
}
+
+func TestCoreGetObjectMetadata(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping functional tests for the short runs")
+ }
+
+ core, err := NewCore(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableSecurity)))
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test")
+
+ // Make a new bucket.
+ err = core.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ t.Fatal("Error:", err, bucketName)
+ }
+
+ metadata := map[string]string{
+ "X-Amz-Meta-Key-1": "Val-1",
+ }
+
+ _, err = core.PutObject(bucketName, "my-objectname",
+ bytes.NewReader([]byte("hello")), 5, "", "", metadata)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ reader, objInfo, err := core.GetObject(bucketName, "my-objectname", GetObjectOptions{})
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ if objInfo.Metadata.Get("X-Amz-Meta-Key-1") != "Val-1" {
+ log.Fatalln("Expected metadata to be available but wasn't")
+ }
+}
diff --git a/docs/API.md b/docs/API.md
index e0d0a11..d1026ee 100644
--- a/docs/API.md
+++ b/docs/API.md
@@ -55,17 +55,19 @@ func main() {
| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
-| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutObjectStreaming`](#PutObjectStreaming) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
-| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
-| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`NewSSEInfo`](#NewSSEInfo) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
| | [`FPutObject`](#FPutObject) | | | | |
| | [`FGetObject`](#FGetObject) | | | | |
| | [`ComposeObject`](#ComposeObject) | | | | |
| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
-
-
+| | [`PutObjectWithContext`](#PutObjectWithContext) | | | |
+| | [`GetObjectWithContext`](#GetObjectWithContext) | | | |
+| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | |
+| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | |
## 1. Constructor
<a name="Minio"></a>
@@ -82,7 +84,7 @@ __Parameters__
|`ssl` | _bool_ | If 'true' API requests will be secure (HTTPS), and insecure (HTTP) otherwise |
### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error)
-Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when if your application deals with single region.
+Initializes minio client, with region configured. Unlike New(), NewWithRegion avoids bucket-location lookup operations and it is slightly faster. Use this function when your application deals with a single region.
__Parameters__
@@ -121,7 +123,7 @@ __Example__
```go
-err := minioClient.MakeBucket("mybucket", "us-east-1")
+err = minioClient.MakeBucket("mybucket", "us-east-1")
if err != nil {
fmt.Println(err)
return
@@ -131,15 +133,16 @@ fmt.Println("Successfully created mybucket.")
<a name="ListBuckets"></a>
### ListBuckets() ([]BucketInfo, error)
-
Lists all buckets.
| Param | Type | Description |
|---|---|---|
-|`bucketList` | _[]BucketInfo_ | Lists of all buckets |
+|`bucketList` | _[]minio.BucketInfo_ | Lists of all buckets |
-| Param | Type | Description |
+__minio.BucketInfo__
+
+| Field | Type | Description |
|---|---|---|
|`bucket.Name` | _string_ | Name of the bucket |
|`bucket.CreationDate` | _time.Time_ | Date of bucket creation |
@@ -150,7 +153,7 @@ __Example__
```go
buckets, err := minioClient.ListBuckets()
- if err != nil {
+if err != nil {
fmt.Println(err)
return
}
@@ -161,7 +164,6 @@ for _, bucket := range buckets {
<a name="BucketExists"></a>
### BucketExists(bucketName string) (found bool, err error)
-
Checks if a bucket exists.
__Parameters__
@@ -196,8 +198,7 @@ if found {
<a name="RemoveBucket"></a>
### RemoveBucket(bucketName string) error
-
-Removes a bucket.
+Removes a bucket, bucket should be empty to be successfully removed.
__Parameters__
@@ -210,7 +211,7 @@ __Example__
```go
-err := minioClient.RemoveBucket("mybucket")
+err = minioClient.RemoveBucket("mybucket")
if err != nil {
fmt.Println(err)
return
@@ -219,7 +220,6 @@ if err != nil {
<a name="ListObjects"></a>
### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-
Lists objects in a bucket.
__Parameters__
@@ -237,9 +237,11 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
+|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all objects in the bucket, the object is of the format listed below: |
-|Param |Type |Description |
+__minio.ObjectInfo__
+
+|Field |Type |Description |
|:---|:---| :---|
|`objectInfo.Key` | _string_ |Name of the object |
|`objectInfo.Size` | _int64_ |Size of the object |
@@ -268,7 +270,6 @@ for object := range objectCh {
<a name="ListObjectsV2"></a>
### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
-
Lists objects in a bucket using the recommended listing API v2
__Parameters__
@@ -286,14 +287,7 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
-
-|Param |Type |Description |
-|:---|:---| :---|
-|`objectInfo.Key` | _string_ |Name of the object |
-|`objectInfo.Size` | _int64_ |Size of the object |
-|`objectInfo.ETag` | _string_ |MD5 checksum of the object |
-|`objectInfo.LastModified` | _time.Time_ |Time when object was last modified |
+|`objectInfo` | _chan minio.ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
```go
@@ -316,7 +310,6 @@ for object := range objectCh {
<a name="ListIncompleteUploads"></a>
### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
-
Lists partially uploaded objects in a bucket.
@@ -335,11 +328,11 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
+|`multiPartInfo` | _chan minio.ObjectMultipartInfo_ |Emits multipart objects of the format listed below: |
-__Return Value__
+__minio.ObjectMultipartInfo__
-|Param |Type |Description |
+|Field |Type |Description |
|:---|:---| :---|
|`multiPartObjInfo.Key` | _string_ |Name of incompletely uploaded object |
|`multiPartObjInfo.UploadID` | _string_ |Upload ID of incompletely uploaded object |
@@ -369,8 +362,7 @@ for multiPartObject := range multiPartObjectCh {
## 3. Object operations
<a name="GetObject"></a>
-### GetObject(bucketName, objectName string) (*Object, error)
-
+### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error)
Returns a stream of the object data. Most of the common errors occur when reading the stream.
@@ -381,7 +373,14 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+
+__minio.GetObjectOptions__
+
+|Field | Type | Description |
+|:---|:---|:---|
+| `opts.Materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Return Value__
@@ -395,7 +394,7 @@ __Example__
```go
-object, err := minioClient.GetObject("mybucket", "photo.jpg")
+object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{})
if err != nil {
fmt.Println(err)
return
@@ -412,34 +411,146 @@ if _, err = io.Copy(localFile, object); err != nil {
```
<a name="FGetObject"></a>
-### FGetObject(bucketName, objectName, filePath string) error
- Downloads and saves the object as a file in the local filesystem.
+### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error
+Downloads and saves the object as a file in the local filesystem.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+
+
+__Example__
+
+
+```go
+err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="GetObjectWithContext"></a>
+### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+Identical to GetObject operation, but accepts a context for request cancellation.
+
+__Parameters__
+|Param |Type |Description |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
+
+
+__Return Value__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_ represents object reader. It implements io.Reader, io.Seeker, io.ReaderAt and io.Closer interfaces. |
+
+
+__Example__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObjectWithContext"></a>
+### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error
+Identical to FGetObject operation, but allows request cancellation.
+
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`filePath` | _string_ |Path to download object to |
+|`opts` | _minio.GetObjectOptions_ | Options for GET requests specifying additional options like encryption, If-Match |
__Example__
```go
-err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
if err != nil {
fmt.Println(err)
return
}
```
-<a name="PutObject"></a>
-### PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int, err error)
+<a name="FGetEncryptedObject"></a>
+### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error
+Identical to FGetObject operation, but decrypts an encrypted request
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to download object to |
+|`materials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
+
+__Example__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutObject"></a>
+### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
__Parameters__
@@ -450,7 +561,20 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
-|`contentType` | _string_ |Content type of the object |
+|`objectSize`| _int64_ |Size of the object being uploaded. Pass -1 if stream size is unknown |
+|`opts` | _minio.PutObjectOptions_ | Allows user to set optional custom metadata, content headers, encryption keys and number of threads for multipart upload operation. |
+
+__minio.PutObjectOptions__
+
+|Field | Type | Description |
+|:--- |:--- | :--- |
+| `opts.UserMetadata` | _map[string]string_ | Map of user metadata|
+| `opts.Progress` | _io.Reader_ | Reader to fetch progress of an upload |
+| `opts.ContentType` | _string_ | Content type of object, e.g "application/text" |
+| `opts.ContentEncoding` | _string_ | Content encoding of object, e.g "gzip" |
+| `opts.ContentDisposition` | _string_ | Content disposition of object, "inline" |
+| `opts.CacheControl` | _string_ | Used to specify directives for caching mechanisms in both requests and responses e.g "max-age=600"|
+| `opts.EncryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Example__
@@ -464,32 +588,46 @@ if err != nil {
}
defer file.Close()
-n, err := minioClient.PutObject("mybucket", "myobject", file, "application/octet-stream")
+fileStat, err := file.Stat()
if err != nil {
fmt.Println(err)
return
}
-```
-<a name="PutObjectStreaming"></a>
-### PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int, err error)
+n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
-Uploads an object as multiple chunks keeping memory consumption constant. It is similar to PutObject in how objects are broken into multiple parts. Each part in turn is transferred as multiple chunks with constant memory usage. However resuming previously failed uploads from where it was left is not supported.
+API methods PutObjectWithSize, PutObjectWithMetadata, PutObjectStreaming, and PutObjectWithProgress available in minio-go SDK release v3.0.3 are replaced by the new PutObject call variant that accepts a pointer to PutObjectOptions struct.
+<a name="PutObjectWithContext"></a>
+### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error)
+Identical to PutObject operation, but allows request cancellation.
__Parameters__
|Param |Type |Description |
-|:---|:---|:---|
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
+|`objectSize`| _int64_ | size of the object being uploaded. Pass -1 if stream size is unknown |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
+
__Example__
```go
+ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second)
+defer cancel()
+
file, err := os.Open("my-testfile")
if err != nil {
fmt.Println(err)
@@ -497,50 +635,63 @@ if err != nil {
}
defer file.Close()
-n, err := minioClient.PutObjectStreaming("mybucket", "myobject", file)
+fileStat, err := file.Stat()
if err != nil {
fmt.Println(err)
return
}
-```
+n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
<a name="CopyObject"></a>
### CopyObject(dst DestinationInfo, src SourceInfo) error
-
Create or replace an object through server-side copying of an existing object. It supports conditional copying, copying a part of an object and server-side encryption of destination and decryption of source. See the `SourceInfo` and `DestinationInfo` types for further details.
To copy multiple source objects into a single destination object see the `ComposeObject` API.
-
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
-|`dst` | _DestinationInfo_ |Argument describing the destination object |
-|`src` | _SourceInfo_ |Argument describing the source object |
+|`dst` | _minio.DestinationInfo_ |Argument describing the destination object |
+|`src` | _minio.SourceInfo_ |Argument describing the source object |
__Example__
```go
-// Use-case 1: Simple copy object with no conditions, etc
+// Use-case 1: Simple copy object with no conditions.
// Source object
src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
// Destination object
-dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-/ Copy object call
-err = s3Client.CopyObject(dst, src)
+// Copy object call
+err = minioClient.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
}
+```
-// Use-case 2: Copy object with copy-conditions, and copying only part of the source object.
+```go
+// Use-case 2:
+// Copy object with copy-conditions, and copying only part of the source object.
// 1. that matches a given ETag
// 2. and modified after 1st April 2014
// 3. but unmodified since 23rd April 2014
@@ -562,10 +713,14 @@ src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC)
src.SetRange(0, 1024*1024-1)
// Destination object
-dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-/ Copy object call
-err = s3Client.CopyObject(dst, src)
+// Copy object call
+err = minioClient.CopyObject(dst, src)
if err != nil {
fmt.Println(err)
return
@@ -573,10 +728,8 @@ if err != nil {
```
<a name="ComposeObject"></a>
-### ComposeObject(dst DestinationInfo, srcs []SourceInfo) error
-
-Create an object by concatenating a list of source objects using
-server-side copying.
+### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error
+Create an object by concatenating a list of source objects using server-side copying.
__Parameters__
@@ -597,14 +750,14 @@ decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
// Source objects to concatenate. We also specify decryption
// key for each
-src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
-src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
+src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
-src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
-src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
+src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
+src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
-src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
-src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
+src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
+src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
// Create slice of sources.
srcs := []minio.SourceInfo{src1, src2, src3}
@@ -613,19 +766,24 @@ srcs := []minio.SourceInfo{src1, src2, src3}
encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
// Create destination info
-dst := minio.NewDestinationInfo("bucket", "object", encKey, nil)
-err = s3Client.ComposeObject(dst, srcs)
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Compose object call by concatenating multiple source files.
+err = minioClient.ComposeObject(dst, srcs)
if err != nil {
- log.Println(err)
- return
+ fmt.Println(err)
+ return
}
-log.Println("Composed object successfully.")
+fmt.Println("Composed object successfully.")
```
<a name="NewSourceInfo"></a>
### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
-
Construct a `SourceInfo` object that can be used as the source for server-side copying operations like `CopyObject` and `ComposeObject`. This object can be used to set copy-conditions on the source.
__Parameters__
@@ -638,18 +796,47 @@ __Parameters__
__Example__
-``` go
+```go
// No decryption parameter.
-src := NewSourceInfo("bucket", "object", nil)
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+```go
// With decryption parameter.
-decKey := NewSSEKey([]byte{1,2,3}, "")
-src := NewSourceInfo("bucket", "object", decKey)
+decKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+src := minio.NewSourceInfo("bucket", "object", &decKey)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
```
<a name="NewDestinationInfo"></a>
-### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) DestinationInfo
-
+### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error)
Construct a `DestinationInfo` object that can be used as the destination object for server-side copying operations like `CopyObject` and `ComposeObject`.
__Parameters__
@@ -663,24 +850,48 @@ __Parameters__
__Example__
-``` go
+```go
// No encryption parameter.
-src := NewDestinationInfo("bucket", "object", nil, nil)
+src := minio.NewSourceInfo("bucket", "object", nil)
+dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-// With encryption parameter.
-encKey := NewSSEKey([]byte{1,2,3}, "")
-src := NewDecryptionInfo("bucket", "object", encKey, nil)
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
```
+```go
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// With encryption parameter.
+encKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
-<a name="FPutObject"></a>
-### FPutObject(bucketName, objectName, filePath, contentType string) (length int64, err error)
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="FPutObject"></a>
+### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
Uploads contents from a file to objectName.
FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
-
__Parameters__
@@ -689,25 +900,56 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`filePath` | _string_ |Path to file to be uploaded |
-|`contentType` | _string_ |Content type of the object |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
__Example__
```go
-n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
+n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+});
if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully uploaded bytes: ", n)
```
-<a name="StatObject"></a>
-### StatObject(bucketName, objectName string) (ObjectInfo, error)
+<a name="FPutObjectWithContext"></a>
+### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+Identical to FPutObject operation, but allows request cancellation.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |Request context |
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`opts` | _minio.PutObjectOptions_ |Pointer to struct that allows user to set optional custom metadata, content-type, content-encoding,content-disposition and cache-control headers, pass encryption module for encrypting objects, and optionally configure number of threads for multipart put operation. |
-Gets metadata of an object.
+__Example__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+<a name="StatObject"></a>
+### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error)
+Fetch metadata of an object.
__Parameters__
@@ -716,16 +958,19 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
+|`opts` | _minio.StatObjectOptions_ | Options for GET info/stat requests specifying additional options like encryption, If-Match |
__Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`objInfo` | _ObjectInfo_ |Object stat information |
+|`objInfo` | _minio.ObjectInfo_ |Object stat information |
-|Param |Type |Description |
+__minio.ObjectInfo__
+
+|Field |Type |Description |
|:---|:---| :---|
|`objInfo.LastModified` | _time.Time_ |Time when object was last modified |
|`objInfo.ETag` | _string_ |MD5 checksum of the object|
@@ -733,11 +978,11 @@ __Return Value__
|`objInfo.Size` | _int64_ |Size of the object|
- __Example__
+__Example__
```go
-objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
+objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{})
if err != nil {
fmt.Println(err)
return
@@ -747,10 +992,8 @@ fmt.Println(objInfo)
<a name="RemoveObject"></a>
### RemoveObject(bucketName, objectName string) error
-
Removes an object.
-
__Parameters__
@@ -761,46 +1004,54 @@ __Parameters__
```go
-err := minioClient.RemoveObject("mybucket", "photo.jpg")
+err = minioClient.RemoveObject("mybucket", "myobject")
if err != nil {
fmt.Println(err)
return
}
```
-<a name="RemoveObjects"></a>
-### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
-Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time.
-The errors observed are sent over the error channel.
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError)
+Removes a list of objects obtained from an input channel. The call sends a delete request to the server up to 1000 objects at a time. The errors observed are sent over the error channel.
__Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
-|`objectsCh` | _chan string_ | Prefix of objects to be removed |
+|`objectsCh` | _chan string_ | Channel of objects to be removed |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`errorCh` | _chan minio.RemoveObjectError | Channel of errors observed during deletion. |
-
+|`errorCh` | _<-chan minio.RemoveObjectError_ | Receive-only channel of errors observed during deletion. |
```go
-errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
-for e := range errorCh {
- fmt.Println("Error detected during deletion: " + e.Err.Error())
+objectsCh := make(chan string)
+
+// Send object names that are needed to be removed to objectsCh
+go func() {
+ defer close(objectsCh)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
+ }
+}()
+
+for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) {
+ fmt.Println("Error detected during deletion: ", rErr)
}
```
-
-
<a name="RemoveIncompleteUpload"></a>
### RemoveIncompleteUpload(bucketName, objectName string) error
-
Removes a partially uploaded object.
__Parameters__
@@ -815,7 +1066,7 @@ __Example__
```go
-err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
+err = minioClient.RemoveIncompleteUpload("mybucket", "myobject")
if err != nil {
fmt.Println(err)
return
@@ -825,7 +1076,7 @@ if err != nil {
## 4. Encrypted object operations
<a name="NewSymmetricKey"></a>
-### NewSymmetricKey(key []byte) *minio.SymmetricKey
+### NewSymmetricKey(key []byte) *encrypt.SymmetricKey
__Parameters__
@@ -838,15 +1089,29 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`symmetricKey` | _*minio.SymmetricKey_ |_minio.SymmetricKey_ represents a symmetric key structure which can be used to encrypt and decrypt data. |
+|`symmetricKey` | _*encrypt.SymmetricKey_ | represents a symmetric key structure which can be used to encrypt and decrypt data |
```go
-symKey := minio.NewSymmetricKey([]byte("my-secret-key-00"))
-```
+symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material with symmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials)
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
<a name="NewAsymmetricKey"></a>
-### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*minio.AsymmetricKey, error)
+### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error)
__Parameters__
@@ -860,32 +1125,50 @@ __Return Value__
|Param |Type |Description |
|:---|:---| :---|
-|`asymmetricKey` | _*minio.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data. |
-|`err` | _error_ | encountered errors. |
+|`asymmetricKey` | _*encrypt.AsymmetricKey_ | represents an asymmetric key structure which can be used to encrypt and decrypt data |
+|`err` | _error_ | Standard Error |
```go
privateKey, err := ioutil.ReadFile("private.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
publicKey, err := ioutil.ReadFile("public.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Initialize the asymmetric key
-asymmetricKey, err := minio.NewAsymmetricKey(privateKey, publicKey)
+asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption material for asymmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
+defer object.Close()
```
<a name="GetEncryptedObject"></a>
-### GetEncryptedObject(bucketName, objectName string, encryptMaterials minio.EncryptionMaterials) (io.ReadCloser, error)
+### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error)
-Returns the decrypted stream of the object data based of the given encryption materiels. Most of the common errors occur when reading the stream.
+Returns the decrypted stream of the object data based of the given encryption materials. Most of the common errors occur when reading the stream.
__Parameters__
@@ -893,7 +1176,7 @@ __Parameters__
|:---|:---| :---|
|`bucketName` | _string_ | Name of the bucket |
|`objectName` | _string_ | Name of the object |
-|`encryptMaterials` | _minio.EncryptionMaterials_ | The module to decrypt the object data |
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Return Value__
@@ -909,15 +1192,16 @@ __Example__
```go
// Generate a master symmetric key
-key := minio.NewSymmetricKey("my-secret-key-00")
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
// Build the CBC encryption material
-cbcMaterials, err := NewCBCSecureMaterials(key)
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
if err != nil {
- t.Fatal(err)
+ fmt.Println(err)
+ return
}
-object, err := minioClient.GetEncryptedObject("mybucket", "photo.jpg", cbcMaterials)
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
if err != nil {
fmt.Println(err)
return
@@ -929,6 +1213,7 @@ if err != nil {
fmt.Println(err)
return
}
+defer localFile.Close()
if _, err = io.Copy(localFile, object); err != nil {
fmt.Println(err)
@@ -938,11 +1223,9 @@ if _, err = io.Copy(localFile, object); err != nil {
<a name="PutEncryptedObject"></a>
-### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials minio.EncryptionMaterials, metadata map[string][]string, progress io.Reader) (n int, err error)
-
+### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error)
Encrypt and upload an object.
-
__Parameters__
|Param |Type |Description |
@@ -950,10 +1233,7 @@ __Parameters__
|`bucketName` | _string_ |Name of the bucket |
|`objectName` | _string_ |Name of the object |
|`reader` | _io.Reader_ |Any Go type that implements io.Reader |
-|`encryptMaterials` | _minio.EncryptionMaterials_ | The module that encrypts data |
-|`metadata` | _map[string][]string_ | Object metadata to be stored |
-|`progress` | io.Reader | A reader to update the upload progress |
-
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go) |
__Example__
@@ -961,25 +1241,29 @@ __Example__
// Load a private key
privateKey, err := ioutil.ReadFile("private.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Load a public key
publicKey, err := ioutil.ReadFile("public.key")
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Build an asymmetric key
-key, err := NewAssymetricKey(privateKey, publicKey)
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
if err != nil {
- log.Fatal(err)
+ fmt.Println(err)
+ return
}
// Build the CBC encryption module
-cbcMaterials, err := NewCBCSecureMaterials(key)
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
if err != nil {
- t.Fatal(err)
+ fmt.Println(err)
+ return
}
// Open a file to upload
@@ -991,17 +1275,71 @@ if err != nil {
defer file.Close()
// Upload the encrypted form of the file
-n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, encryptMaterials, nil, nil)
+n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="FPutEncryptedObject"></a>
+### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error)
+Encrypt and upload an object from a file.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`filePath` | _string_ |Path to file to be uploaded |
+|`encryptMaterials` | _encrypt.Materials_ | Interface provided by `encrypt` package to encrypt a stream of data (For more information see https://godoc.org/github.com/minio/minio-go)The module that encrypts data |
+
+__Example__
+
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials)
if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
```
<a name="NewSSEInfo"></a>
### NewSSEInfo(key []byte, algo string) SSEInfo
-
Create a key object for use as encryption or decryption parameter in operations involving server-side-encryption with customer provided key (SSE-C).
__Parameters__
@@ -1011,18 +1349,11 @@ __Parameters__
| `key` | _[]byte_ | Byte-slice of the raw, un-encoded binary key |
| `algo` | _string_ | Algorithm to use in encryption or decryption with the given key. Can be empty (defaults to `AES256`) |
-__Example__
-
-``` go
-// Key for use in encryption/decryption
-keyInfo := NewSSEInfo([]byte{1,2,3}, "")
-```
## 5. Presigned operations
<a name="PresignedGetObject"></a>
### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
-
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
__Parameters__
@@ -1050,17 +1381,15 @@ if err != nil {
fmt.Println(err)
return
}
+fmt.Println("Successfully generated presigned URL", presignedURL)
```
<a name="PresignedPutObject"></a>
### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
-
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
NOTE: you can upload to S3 only with specified object name.
-
-
__Parameters__
@@ -1082,25 +1411,49 @@ if err != nil {
fmt.Println(err)
return
}
-fmt.Println(presignedURL)
+fmt.Println("Successfully generated presigned URL", presignedURL)
```
-<a name="PresignedPostPolicy"></a>
-### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+<a name="PresignedHeadObject"></a>
+### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+Generates a presigned URL for HTTP HEAD operations. Browsers/Mobile clients may point to this URL to directly get metadata from objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
-Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set.
+__Parameters__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |Name of the bucket |
+|`objectName` | _string_ |Name of the object |
+|`expiry` | _time.Duration_ |Expiry of presigned URL in seconds |
+|`reqParams` | _url.Values_ |Additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
-Create policy :
+
+__Example__
```go
-policy := minio.NewPostPolicy()
-```
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
-Apply upload policy restrictions:
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedHeadObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+<a name="PresignedPostPolicy"></a>
+### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set.
```go
+// Initialize policy condition config.
+policy := minio.NewPostPolicy()
+
+// Apply upload policy restrictions:
policy.SetBucket("mybucket")
policy.SetKey("myobject")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
@@ -1111,20 +1464,17 @@ policy.SetContentType("image/png")
// Only allow content size in range 1KB to 1MB.
policy.SetContentLengthRange(1024, 1024*1024)
-// Get the POST form key/value object:
+// Add a user metadata using the key "custom" and value "user"
+policy.SetUserMetadata("custom", "user")
+// Get the POST form key/value object:
url, formData, err := minioClient.PresignedPostPolicy(policy)
if err != nil {
fmt.Println(err)
return
}
-```
-
-POST your content from the command line using `curl`:
-
-
-```go
+// POST your content from the command line using `curl`
fmt.Printf("curl ")
for k, v := range formData {
fmt.Printf("-F %s=%s ", k, v)
@@ -1137,7 +1487,6 @@ fmt.Printf("%s\n", url)
<a name="SetBucketPolicy"></a>
### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
-
Set access permissions on bucket or an object prefix.
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
@@ -1168,7 +1517,9 @@ __Example__
```go
-err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
+// Sets 'mybucket' with a sub-directory 'myprefix' to be anonymously accessible for
+// both read and write operations.
+err = minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
if err != nil {
fmt.Println(err)
return
@@ -1177,7 +1528,6 @@ if err != nil {
<a name="GetBucketPolicy"></a>
### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
-
Get access permissions on a bucket or a prefix.
Importing `github.com/minio/minio-go/pkg/policy` package is needed.
@@ -1212,7 +1562,6 @@ fmt.Println("Access permissions for mybucket is", bucketPolicy)
<a name="ListBucketPolicies"></a>
### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
-
Get access permissions rules associated to the specified bucket and prefix.
__Parameters__
@@ -1228,7 +1577,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketPolicies` | _map[string]BucketPolicy_ |Map of object resource paths and their permissions |
+|`bucketPolicies` | _map[string]minio.BucketPolicy_ |Map of object resource paths and their permissions |
|`err` | _error_ |Standard Error |
__Example__
@@ -1247,8 +1596,7 @@ for resource, permission := range bucketPolicies {
<a name="GetBucketNotification"></a>
### GetBucketNotification(bucketName string) (BucketNotification, error)
-
-Get all notification configurations related to the specified bucket.
+Get notification configuration on a bucket.
__Parameters__
@@ -1262,7 +1610,7 @@ __Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
+|`bucketNotification` | _minio.BucketNotification_ |structure which holds all notification configurations|
|`err` | _error_ |Standard Error |
__Example__
@@ -1271,10 +1619,12 @@ __Example__
```go
bucketNotification, err := minioClient.GetBucketNotification("mybucket")
if err != nil {
- log.Fatalf("Failed to get bucket notification configurations for mybucket - %v", err)
+ fmt.Println("Failed to get bucket notification configurations for mybucket", err)
+ return
}
-for _, topicConfig := range bucketNotification.TopicConfigs {
- for _, e := range topicConfig.Events {
+
+for _, queueConfig := range bucketNotification.QueueConfigs {
+ for _, e := range queueConfig.Events {
fmt.Println(e + " event is enabled")
}
}
@@ -1282,7 +1632,6 @@ for _, topicConfig := range bucketNotification.TopicConfigs {
<a name="SetBucketNotification"></a>
### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
-
Set a new bucket notification on a bucket.
__Parameters__
@@ -1291,7 +1640,7 @@ __Parameters__
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |Name of the bucket |
-|`bucketNotification` | _BucketNotification_ |Represents the XML to be sent to the configured web service |
+|`bucketNotification` | _minio.BucketNotification_ |Represents the XML to be sent to the configured web service |
__Return Values__
@@ -1304,24 +1653,25 @@ __Example__
```go
-topicArn := NewArn("aws", "sns", "us-east-1", "804605494417", "PhotoUpdate")
+queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate")
+
+queueConfig := minio.NewNotificationConfig(queueArn)
+queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+queueConfig.AddFilterPrefix("photos/")
+queueConfig.AddFilterSuffix(".jpg")
-topicConfig := NewNotificationConfig(topicArn)
-topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
-lambdaConfig.AddFilterPrefix("photos/")
-lambdaConfig.AddFilterSuffix(".jpg")
+bucketNotification := minio.BucketNotification{}
+bucketNotification.AddQueue(queueConfig)
-bucketNotification := BucketNotification{}
-bucketNotification.AddTopic(topicConfig)
-err := c.SetBucketNotification(bucketName, bucketNotification)
+err = minioClient.SetBucketNotification("mybucket", bucketNotification)
if err != nil {
- fmt.Println("Unable to set the bucket notification: " + err)
+ fmt.Println("Unable to set the bucket notification: ", err)
+ return
}
```
<a name="RemoveAllBucketNotification"></a>
### RemoveAllBucketNotification(bucketName string) error
-
Remove all configured bucket notifications on a bucket.
__Parameters__
@@ -1342,18 +1692,16 @@ __Example__
```go
-err := c.RemoveAllBucketNotification(bucketName)
+err = minioClient.RemoveAllBucketNotification("mybucket")
if err != nil {
fmt.Println("Unable to remove bucket notifications.", err)
+ return
}
```
<a name="ListenBucketNotification"></a>
### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
-
-ListenBucketNotification API receives bucket notification events through the
-notification channel. The returned notification channel has two fields
-'Records' and 'Err'.
+ListenBucketNotification API receives bucket notification events through the notification channel. The returned notification channel has two fields 'Records' and 'Err'.
- 'Records' holds the notifications received from the server.
- 'Err' indicates any error while processing the received notifications.
@@ -1368,17 +1716,20 @@ __Parameters__
|`bucketName` | _string_ | Bucket to listen notifications on |
|`prefix` | _string_ | Object key prefix to filter notifications for |
|`suffix` | _string_ | Object key suffix to filter notifications for |
-|`events` | _[]string_| Enables notifications for specific event types |
+|`events` | _[]string_ | Enables notifications for specific event types |
|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification iterator |
__Return Values__
|Param |Type |Description |
|:---|:---| :---|
-|`chan NotificationInfo` | _chan_ | Read channel for all notifications on bucket |
-|`NotificationInfo` | _object_ | Notification object represents events info |
-|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events |
-|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation |
+|`notificationInfo` | _chan minio.NotificationInfo_ | Channel of bucket notifications |
+
+__minio.NotificationInfo__
+
+|Field |Type |Description |
+|`notificationInfo.Records` | _[]minio.NotificationEvent_ | Collection of notification events |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation (Standard Error) |
__Example__
@@ -1392,15 +1743,15 @@ doneCh := make(chan struct{})
defer close(doneCh)
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
-for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{
"s3:ObjectCreated:*",
"s3:ObjectAccessed:*",
"s3:ObjectRemoved:*",
}, doneCh) {
if notificationInfo.Err != nil {
- log.Fatalln(notificationInfo.Err)
+ fmt.Println(notificationInfo.Err)
}
- log.Println(notificationInfo)
+ fmt.Println(notificationInfo)
}
```
@@ -1408,7 +1759,7 @@ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET"
<a name="SetAppInfo"></a>
### SetAppInfo(appName, appVersion string)
-Adds application details to User-Agent.
+Add custom application details to User-Agent.
__Parameters__
@@ -1428,8 +1779,7 @@ minioClient.SetAppInfo("myCloudApp", "1.0.0")
<a name="SetCustomTransport"></a>
### SetCustomTransport(customHTTPTransport http.RoundTripper)
-Overrides default HTTP transport. This is usually needed for debugging
-or for adding custom TLS certificates.
+Overrides default HTTP transport. This is usually needed for debugging or for adding custom TLS certificates.
__Parameters__
@@ -1440,8 +1790,7 @@ __Parameters__
<a name="TraceOn"></a>
### TraceOn(outputStream io.Writer)
-Enables HTTP tracing. The trace is written to the io.Writer
-provided. If outputStream is nil, trace is written to os.Stdout.
+Enables HTTP tracing. The trace is written to the io.Writer provided. If outputStream is nil, trace is written to os.Stdout.
__Parameters__
@@ -1457,7 +1806,7 @@ Disables HTTP tracing.
<a name="SetS3TransferAccelerate"></a>
### SetS3TransferAccelerate(acceleratedEndpoint string)
Set AWS S3 transfer acceleration endpoint for all API requests hereafter.
-NOTE: This API applies only to AWS S3 and ignored with other S3 compatible object storage services.
+NOTE: This API applies only to AWS S3 and is a no operation for S3 compatible object storage services.
__Parameters__
diff --git a/docs/checker.go.template b/docs/checker.go.template
new file mode 100644
index 0000000..2e0f13a
--- /dev/null
+++ b/docs/checker.go.template
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Use a secure connection.
+ ssl := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+
+ {{.Text}}
+}
diff --git a/docs/validator.go b/docs/validator.go
new file mode 100644
index 0000000..7d5cbaa
--- /dev/null
+++ b/docs/validator.go
@@ -0,0 +1,227 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "text/template"
+
+ "github.com/a8m/mark"
+ "github.com/gernest/wow"
+ "github.com/gernest/wow/spin"
+ "github.com/minio/cli"
+)
+
+func init() {
+ // Validate go binary.
+ if _, err := exec.LookPath("go"); err != nil {
+ panic(err)
+ }
+}
+
+var globalFlags = []cli.Flag{
+ cli.StringFlag{
+ Name: "m",
+ Value: "API.md",
+ Usage: "Path to markdown api documentation.",
+ },
+ cli.StringFlag{
+ Name: "t",
+ Value: "checker.go.template",
+ Usage: "Template used for generating the programs.",
+ },
+ cli.IntFlag{
+ Name: "skip",
+ Value: 2,
+ Usage: "Skip entries before validating the code.",
+ },
+}
+
+func runGofmt(path string) (msg string, err error) {
+ cmdArgs := []string{"-s", "-w", "-l", path}
+ cmd := exec.Command("gofmt", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ return string(stdoutStderr), nil
+}
+
+func runGoImports(path string) (msg string, err error) {
+ cmdArgs := []string{"-w", path}
+ cmd := exec.Command("goimports", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return string(stdoutStderr), err
+ }
+ return string(stdoutStderr), nil
+}
+
+func runGoBuild(path string) (msg string, err error) {
+ // Go build the path.
+ cmdArgs := []string{"build", "-o", "/dev/null", path}
+ cmd := exec.Command("go", cmdArgs...)
+ stdoutStderr, err := cmd.CombinedOutput()
+ if err != nil {
+ return string(stdoutStderr), err
+ }
+ return string(stdoutStderr), nil
+}
+
+func validatorAction(ctx *cli.Context) error {
+ if !ctx.IsSet("m") || !ctx.IsSet("t") {
+ return nil
+ }
+ docPath := ctx.String("m")
+ var err error
+ docPath, err = filepath.Abs(docPath)
+ if err != nil {
+ return err
+ }
+ data, err := ioutil.ReadFile(docPath)
+ if err != nil {
+ return err
+ }
+
+ templatePath := ctx.String("t")
+ templatePath, err = filepath.Abs(templatePath)
+ if err != nil {
+ return err
+ }
+
+ skipEntries := ctx.Int("skip")
+ m := mark.New(string(data), &mark.Options{
+ Gfm: true, // Github markdown support is enabled by default.
+ })
+
+ t, err := template.ParseFiles(templatePath)
+ if err != nil {
+ return err
+ }
+
+ tmpDir, err := ioutil.TempDir("", "md-verifier")
+ if err != nil {
+ return err
+ }
+ defer os.RemoveAll(tmpDir)
+
+ entryN := 1
+ for i := mark.NodeText; i < mark.NodeCheckbox; i++ {
+ if mark.NodeCode != mark.NodeType(i) {
+ m.AddRenderFn(mark.NodeType(i), func(node mark.Node) (s string) {
+ return ""
+ })
+ continue
+ }
+ m.AddRenderFn(mark.NodeCode, func(node mark.Node) (s string) {
+ p, ok := node.(*mark.CodeNode)
+ if !ok {
+ return
+ }
+ p.Text = strings.NewReplacer("&lt;", "<", "&gt;", ">", "&quot;", `"`, "&amp;", "&").Replace(p.Text)
+ if skipEntries > 0 {
+ skipEntries--
+ return
+ }
+
+ testFilePath := filepath.Join(tmpDir, "example.go")
+ w, werr := os.Create(testFilePath)
+ if werr != nil {
+ panic(werr)
+ }
+ t.Execute(w, p)
+ w.Sync()
+ w.Close()
+ entryN++
+
+ msg, err := runGofmt(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ os.Exit(-1)
+ }
+
+ msg, err = runGoImports(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gofmt on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ os.Exit(-1)
+ }
+
+ msg, err = runGoBuild(testFilePath)
+ if err != nil {
+ fmt.Printf("Failed running gobuild on %s, with (%s):(%s)\n", testFilePath, msg, err)
+ fmt.Printf("Code with possible issue in %s:\n%s", docPath, p.Text)
+ fmt.Printf("To test `go build %s`\n", testFilePath)
+ os.Exit(-1)
+ }
+
+ // Once successfully built remove the test file
+ os.Remove(testFilePath)
+ return
+ })
+ }
+
+ w := wow.New(os.Stdout, spin.Get(spin.Moon), fmt.Sprintf(" Running validation tests in %s", tmpDir))
+
+ w.Start()
+ // Render markdown executes our checker on each code blocks.
+ _ = m.Render()
+ w.PersistWith(spin.Get(spin.Runner), " Successfully finished tests")
+ w.Stop()
+
+ return nil
+}
+
+func main() {
+ app := cli.NewApp()
+ app.Action = validatorAction
+ app.HideVersion = true
+ app.HideHelpCommand = true
+ app.Usage = "Validates code block sections inside API.md"
+ app.Author = "Minio.io"
+ app.Flags = globalFlags
+ // Help template for validator
+ app.CustomAppHelpTemplate = `NAME:
+ {{.Name}} - {{.Usage}}
+
+USAGE:
+ {{.Name}} {{if .VisibleFlags}}[FLAGS] {{end}}COMMAND{{if .VisibleFlags}} [COMMAND FLAGS | -h]{{end}} [ARGUMENTS...]
+
+COMMANDS:
+ {{range .VisibleCommands}}{{join .Names ", "}}{{ "\t" }}{{.Usage}}
+ {{end}}{{if .VisibleFlags}}
+FLAGS:
+ {{range .VisibleFlags}}{{.}}
+ {{end}}{{end}}
+TEMPLATE:
+ Validator uses Go's 'text/template' formatting so you need to ensure
+ your template is formatted correctly, check 'docs/checker.go.template'
+
+USAGE:
+ go run docs/validator.go -m docs/API.md -t /tmp/mycode.go.template
+
+`
+ app.Run(os.Args)
+
+}
diff --git a/docs/zh_CN/API.md b/docs/zh_CN/API.md
new file mode 100644
index 0000000..d20ca10
--- /dev/null
+++ b/docs/zh_CN/API.md
@@ -0,0 +1,1820 @@
+# Minio Go Client API文档 [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
+
+## 初使化Minio Client对象。
+
+## Minio
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // 使用ssl
+ ssl := true
+
+ // 初使化minio client对象。
+ minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+```
+
+## AWS S3
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // 使用ssl
+ ssl := true
+
+ // 初使化minio client对象。
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", ssl)
+ if err != nil {
+ fmt.Println(err)
+ return
+ }
+}
+```
+
+| 操作存储桶 | 操作对象 | 操作加密对象 | Presigned操作 | 存储桶策略/通知 | 客户端自定义设置 |
+| :--- | :--- | :--- | :--- | :--- | :--- |
+| [`MakeBucket`](#MakeBucket) | [`GetObject`](#GetObject) | [`NewSymmetricKey`](#NewSymmetricKey) | [`PresignedGetObject`](#PresignedGetObject) | [`SetBucketPolicy`](#SetBucketPolicy) | [`SetAppInfo`](#SetAppInfo) |
+| [`ListBuckets`](#ListBuckets) | [`PutObject`](#PutObject) | [`NewAsymmetricKey`](#NewAsymmetricKey) | [`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) | [`SetCustomTransport`](#SetCustomTransport) |
+| [`BucketExists`](#BucketExists) | [`CopyObject`](#CopyObject) | [`GetEncryptedObject`](#GetEncryptedObject) | [`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) | [`TraceOn`](#TraceOn) |
+| [`RemoveBucket`](#RemoveBucket) | [`StatObject`](#StatObject) | [`PutEncryptedObject`](#PutEncryptedObject) | | [`SetBucketNotification`](#SetBucketNotification) | [`TraceOff`](#TraceOff) |
+| [`ListObjects`](#ListObjects) | [`RemoveObject`](#RemoveObject) | [`NewSSEInfo`](#NewSSEInfo) | | [`GetBucketNotification`](#GetBucketNotification) | [`SetS3TransferAccelerate`](#SetS3TransferAccelerate) |
+| [`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | [`FPutEncryptedObject`](#FPutEncryptedObject) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) | |
+| [`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | | [`ListenBucketNotification`](#ListenBucketNotification) | |
+| | [`FPutObject`](#FPutObject) | | | | |
+| | [`FGetObject`](#FGetObject) | | | | |
+| | [`ComposeObject`](#ComposeObject) | | | | |
+| | [`NewSourceInfo`](#NewSourceInfo) | | | | |
+| | [`NewDestinationInfo`](#NewDestinationInfo) | | | | |
+| | [`PutObjectWithContext`](#PutObjectWithContext) | | | |
+| | [`GetObjectWithContext`](#GetObjectWithContext) | | | |
+| | [`FPutObjectWithContext`](#FPutObjectWithContext) | | | |
+| | [`FGetObjectWithContext`](#FGetObjectWithContext) | | | |
+## 1. 构造函数
+<a name="Minio"></a>
+
+### New(endpoint, accessKeyID, secretAccessKey string, ssl bool) (*Client, error)
+初使化一个新的client对象。
+
+__参数__
+
+|参数 | 类型 |描述 |
+|:---|:---| :---|
+|`endpoint` | _string_ |S3兼容对象存储服务endpoint |
+|`accessKeyID` |_string_ |对象存储的Access key |
+|`secretAccessKey` | _string_ |对象存储的Secret key |
+|`ssl` | _bool_ |true代表使用HTTPS |
+
+### NewWithRegion(endpoint, accessKeyID, secretAccessKey string, ssl bool, region string) (*Client, error)
+初使化minio client,带有region配置。和New()不同的是,NewWithRegion避免了bucket-location操作,所以会快那么一丢丢。如果你的应用只使用一个region的话可以用这个方法。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`endpoint` | _string_ |S3兼容对象存储服务endpoint |
+|`accessKeyID` |_string_ |对象存储的Access key |
+|`secretAccessKey` | _string_ |对象存储的Secret key |
+|`ssl` | _bool_ |true代表使用HTTPS |
+|`region`| _string_ | 对象存储的region |
+
+## 2. 操作存储桶
+
+<a name="MakeBucket"></a>
+### MakeBucket(bucketName, location string) error
+创建一个存储桶。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucketName` | _string_ | 存储桶名称 |
+| `location` | _string_ | 存储桶被创建的region(地区),默认是us-east-1(美国东一区),下面列举的是其它合法的值。注意:如果用的是minio服务的话,resion是在它的配置文件中,(默认是us-east-1)。|
+| | |us-east-1 |
+| | |us-west-1 |
+| | |us-west-2 |
+| | |eu-west-1 |
+| | | eu-central-1|
+| | | ap-southeast-1|
+| | | ap-northeast-1|
+| | | ap-southeast-2|
+| | | sa-east-1|
+
+
+__示例__
+
+
+```go
+err = minioClient.MakeBucket("mybucket", "us-east-1")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully created mybucket.")
+```
+
+<a name="ListBuckets"></a>
+### ListBuckets() ([]BucketInfo, error)
+列出所有的存储桶。
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucketList` | _[]minio.BucketInfo_ | 所有存储桶的list。 |
+
+
+__minio.BucketInfo__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`bucket.Name` | _string_ | 存储桶名称 |
+|`bucket.CreationDate` | _time.Time_ | 存储桶的创建时间 |
+
+
+__示例__
+
+
+```go
+buckets, err := minioClient.ListBuckets()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for _, bucket := range buckets {
+ fmt.Println(bucket)
+}
+```
+
+<a name="BucketExists"></a>
+### BucketExists(bucketName string) (found bool, err error)
+检查存储桶是否存在。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`found` | _bool_ | 存储桶是否存在 |
+|`err` | _error_ | 标准Error |
+
+
+__示例__
+
+
+```go
+found, err := minioClient.BucketExists("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if found {
+ fmt.Println("Bucket found")
+}
+```
+
+<a name="RemoveBucket"></a>
+### RemoveBucket(bucketName string) error
+删除一个存储桶,存储桶必须为空才能被成功删除。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveBucket("mybucket")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="ListObjects"></a>
+### ListObjects(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+列举存储桶里的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` |_string_ | 要列举的对象前缀 |
+|`recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListObjects iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo` | _chan minio.ObjectInfo_ |存储桶中所有对象的read channel,对象的格式如下: |
+
+__minio.ObjectInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |对象的名称 |
+|`objectInfo.Size` | _int64_ |对象的大小 |
+|`objectInfo.ETag` | _string_ |对象的MD5校验码 |
+|`objectInfo.LastModified` | _time.Time_ |对象的最后修改时间 |
+
+
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjects("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+```
+
+
+<a name="ListObjectsV2"></a>
+### ListObjectsV2(bucketName, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+使用listing API v2版本列举存储桶中的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+| `objectPrefix` |_string_ | 要列举的对象前缀 |
+| `recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListObjects iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objectInfo` | _chan minio.ObjectInfo_ |存储桶中所有对象的read channel |
+
+
+```go
+// Create a done channel to control 'ListObjectsV2' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true
+objectCh := minioClient.ListObjectsV2("mybucket", "myprefix", isRecursive, doneCh)
+for object := range objectCh {
+ if object.Err != nil {
+ fmt.Println(object.Err)
+ return
+ }
+ fmt.Println(object)
+}
+```
+
+<a name="ListIncompleteUploads"></a>
+### ListIncompleteUploads(bucketName, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
+列举存储桶中未完整上传的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+| `prefix` |_string_ | 不完整上传的对象的前缀 |
+| `recursive` | _bool_ |`true`代表递归查找,`false`代表类似文件夹查找,以'/'分隔,不查子文件夹。 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListIncompleteUploads iterator的一个message。 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`multiPartInfo` | _chan minio.ObjectMultipartInfo_ |multipart对象格式如下: |
+
+__minio.ObjectMultipartInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`multiPartObjInfo.Key` | _string_ |未完整上传的对象的名称 |
+|`multiPartObjInfo.UploadID` | _string_ |未完整上传的对象的Upload ID |
+|`multiPartObjInfo.Size` | _int64_ |未完整上传的对象的大小 |
+
+__示例__
+
+
+```go
+// Create a done channel to control 'ListObjects' go routine.
+doneCh := make(chan struct{})
+
+// Indicate to our routine to exit cleanly upon return.
+defer close(doneCh)
+
+isRecursive := true // Recursively list everything at 'myprefix'
+multiPartObjectCh := minioClient.ListIncompleteUploads("mybucket", "myprefix", isRecursive, doneCh)
+for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ fmt.Println(multiPartObject.Err)
+ return
+ }
+ fmt.Println(multiPartObject)
+}
+```
+
+## 3. 操作对象
+
+<a name="GetObject"></a>
+### GetObject(bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+返回对象数据的流,error是读流时经常抛的那些错。
+
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__minio.GetObjectOptions__
+
+|参数 | 类型 | 描述 |
+|:---|:---|:---|
+| `opts.Materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_代表了一个object reader。它实现了io.Reader, io.Seeker, io.ReaderAt and io.Closer接口。 |
+
+
+__示例__
+
+
+```go
+object, err := minioClient.GetObject("mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObject"></a>
+### FGetObject(bucketName, objectName, filePath string, opts GetObjectOptions) error
+下载并将文件保存到本地文件系统。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__示例__
+
+
+```go
+err = minioClient.FGetObject("mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+<a name="GetObjectWithContext"></a>
+### GetObjectWithContext(ctx context.Context, bucketName, objectName string, opts GetObjectOptions) (*Object, error)
+和GetObject操作是一样的,不过传入了取消请求的context。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文(Request context) |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`object` | _*minio.Object_ |_minio.Object_代表了一个object reader。它实现了io.Reader, io.Seeker, io.ReaderAt and io.Closer接口。 |
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+object, err := minioClient.GetObjectWithContext(ctx, "mybucket", "myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetObjectWithContext"></a>
+### FGetObjectWithContext(ctx context.Context, bucketName, objectName, filePath string, opts GetObjectOptions) error
+和FGetObject操作是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径 |
+|`opts` | _minio.GetObjectOptions_ | GET请求的一些额外参数,像encryption,If-Match |
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+err = minioClient.FGetObjectWithContext(ctx, "mybucket", "myobject", "/tmp/myobject", minio.GetObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FGetEncryptedObject"></a>
+### FGetEncryptedObject(bucketName, objectName, filePath string, materials encrypt.Materials) error
+和FGetObject操作是一样的,不过会对加密请求进行解密。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |下载后保存的路径|
+|`materials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__示例__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+err = minioClient.FGetEncryptedObject("mybucket", "myobject", "/tmp/myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutObject"></a>
+### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
+当对象小于64MiB时,直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任意实现了io.Reader的GO类型 |
+|`objectSize`| _int64_ |上传的对象的大小,-1代表未知。 |
+|`opts` | _minio.PutObjectOptions_ | 允许用户设置可选的自定义元数据,内容标题,加密密钥和用于分段上传操作的线程数量。 |
+
+__minio.PutObjectOptions__
+
+|属性 | 类型 | 描述 |
+|:--- |:--- | :--- |
+| `opts.UserMetadata` | _map[string]string_ | 用户元数据的Map|
+| `opts.Progress` | _io.Reader_ | 获取上传进度的Reader |
+| `opts.ContentType` | _string_ | 对象的Content type, 例如"application/text" |
+| `opts.ContentEncoding` | _string_ | 对象的Content encoding,例如"gzip" |
+| `opts.ContentDisposition` | _string_ | 对象的Content disposition, "inline" |
+| `opts.CacheControl` | _string_ | 指定针对请求和响应的缓存机制,例如"max-age=600"|
+| `opts.EncryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__示例__
+
+
+```go
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+fileStat, err := file.Stat()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.PutObject("mybucket", "myobject", file, fileStat.Size(), minio.PutObjectOptions{ContentType:"application/octet-stream"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+API方法在minio-go SDK版本v3.0.3中提供的PutObjectWithSize,PutObjectWithMetadata,PutObjectStreaming和PutObjectWithProgress被替换为接受指向PutObjectOptions struct的指针的新的PutObject调用变体。
+
+<a name="PutObjectWithContext"></a>
+### PutObjectWithContext(ctx context.Context, bucketName, objectName string, reader io.Reader, objectSize int64, opts PutObjectOptions) (n int, err error)
+和PutObject是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任何实现io.Reader的Go类型 |
+|`objectSize`| _int64_ | 上传的对象的大小,-1代表未知 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。|
+
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 10 * time.Second)
+defer cancel()
+
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+fileStat, err := file.Stat()
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", file, fileStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="CopyObject"></a>
+### CopyObject(dst DestinationInfo, src SourceInfo) error
+通过在服务端对已存在的对象进行拷贝,实现新建或者替换对象。它支持有条件的拷贝,拷贝对象的一部分,以及在服务端的加解密。请查看`SourceInfo`和`DestinationInfo`两个类型来了解更多细节。
+
+拷贝多个源文件到一个目标对象,请查看`ComposeObject` API。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`dst` | _minio.DestinationInfo_ |目标对象 |
+|`src` | _minio.SourceInfo_ |源对象 |
+
+
+__示例__
+
+
+```go
+// Use-case 1: Simple copy object with no conditions.
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+// Use-case 2:
+// Copy object with copy-conditions, and copying only part of the source object.
+// 1. that matches a given ETag
+// 2. and modified after 1st April 2014
+// 3. but unmodified since 23rd April 2014
+// 4. copy only first 1MiB of object.
+
+// Source object
+src := minio.NewSourceInfo("my-sourcebucketname", "my-sourceobjectname", nil)
+
+// Set matching ETag condition, copy object which matches the following ETag.
+src.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
+
+// Set modified condition, copy object modified since 2014 April 1.
+src.SetModifiedSinceCond(time.Date(2014, time.April, 1, 0, 0, 0, 0, time.UTC))
+
+// Set unmodified condition, copy object unmodified since 2014 April 23.
+src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 23, 0, 0, 0, 0, time.UTC))
+
+// Set copy-range of only first 1MiB of file.
+src.SetRange(0, 1024*1024-1)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="ComposeObject"></a>
+### ComposeObject(dst minio.DestinationInfo, srcs []minio.SourceInfo) error
+通过使用服务端拷贝实现钭多个源对象合并创建成一个新的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---|:---|
+|`dst` | _minio.DestinationInfo_ |要被创建的目标对象 |
+|`srcs` | _[]minio.SourceInfo_ |要合并的多个源对象 |
+
+
+__示例__
+
+
+```go
+// Prepare source decryption key (here we assume same key to
+// decrypt all source objects.)
+decKey := minio.NewSSEInfo([]byte{1, 2, 3}, "")
+
+// Source objects to concatenate. We also specify decryption
+// key for each
+src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
+src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
+
+src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
+src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
+
+src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
+src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
+
+// Create slice of sources.
+srcs := []minio.SourceInfo{src1, src2, src3}
+
+// Prepare destination encryption key
+encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
+
+// Create destination info
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Compose object call by concatenating multiple source files.
+err = minioClient.ComposeObject(dst, srcs)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+fmt.Println("Composed object successfully.")
+```
+
+<a name="NewSourceInfo"></a>
+### NewSourceInfo(bucket, object string, decryptSSEC *SSEInfo) SourceInfo
+构建一个可用于服务端拷贝操作(像`CopyObject`和`ComposeObject`)的`SourceInfo`对象。该对象可用于给源对象设置拷贝条件。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | 源存储桶 |
+| `object` | _string_ | 源对象 |
+| `decryptSSEC` | _*minio.SSEInfo_ | 源对象的解密信息 (`nil`代表不用解密) |
+
+__示例__
+
+```go
+// No decryption parameter.
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+// With decryption parameter.
+decKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+src := minio.NewSourceInfo("bucket", "object", &decKey)
+
+// Destination object
+dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="NewDestinationInfo"></a>
+### NewDestinationInfo(bucket, object string, encryptSSEC *SSEInfo, userMeta map[string]string) (DestinationInfo, error)
+构建一个用于服务端拷贝操作(像`CopyObject`和`ComposeObject`)的用作目标对象的`DestinationInfo`。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `bucket` | _string_ | 目标存储桶名称 |
+| `object` | _string_ | 目标对象名称 |
+| `encryptSSEC` | _*minio.SSEInfo_ | 源对象的加密信息 (`nil`代表不用加密) |
+| `userMeta` | _map[string]string_ | 给目标对象的用户元数据,如果是nil,并只有一个源对象,则将源对象的用户元数据拷贝给目标对象。|
+
+__示例__
+
+```go
+// No encryption parameter.
+src := minio.NewSourceInfo("bucket", "object", nil)
+dst, err := minio.NewDestinationInfo("bucket", "object", nil, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+```go
+src := minio.NewSourceInfo("bucket", "object", nil)
+
+// With encryption parameter.
+encKey := minio.NewSSEInfo([]byte{1,2,3}, "")
+dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Copy object call
+err = minioClient.CopyObject(dst, src)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="FPutObject"></a>
+### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+将filePath对应的文件内容上传到一个对象中。
+
+当对象小于64MiB时,FPutObject直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。 |
+
+
+__示例__
+
+
+```go
+n, err := minioClient.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+});
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="FPutObjectWithContext"></a>
+### FPutObjectWithContext(ctx context.Context, bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
+和FPutObject操作是一样的,不过允许取消请求。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`ctx` | _context.Context_ |请求上下文 |
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`opts` | _minio.PutObjectOptions_ |允许用户设置可选的自定义元数据,content-type,content-encoding,content-disposition以及cache-control headers,传递加密模块以加密对象,并可选地设置multipart put操作的线程数量。 |
+
+__示例__
+
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), 100 * time.Second)
+defer cancel()
+
+n, err := minioClient.FPutObjectWithContext(ctx, "mybucket", "myobject.csv", "/tmp/otherobject.csv", minio.PutObjectOptions{ContentType:"application/csv"})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded bytes: ", n)
+```
+
+<a name="StatObject"></a>
+### StatObject(bucketName, objectName string, opts StatObjectOptions) (ObjectInfo, error)
+获取对象的元数据。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`opts` | _minio.StatObjectOptions_ | GET info/stat请求的一些额外参数,像encryption,If-Match |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`objInfo` | _minio.ObjectInfo_ |对象stat信息 |
+
+
+__minio.ObjectInfo__
+
+|属性 |类型 |描述 |
+|:---|:---| :---|
+|`objInfo.LastModified` | _time.Time_ |对象的最后修改时间 |
+|`objInfo.ETag` | _string_ |对象的MD5校验码|
+|`objInfo.ContentType` | _string_ |对象的Content type|
+|`objInfo.Size` | _int64_ |对象的大小|
+
+
+__示例__
+
+
+```go
+objInfo, err := minioClient.StatObject("mybucket", "myobject", minio.StatObjectOptions{})
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println(objInfo)
+```
+
+<a name="RemoveObject"></a>
+### RemoveObject(bucketName, objectName string) error
+删除一个对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+
+
+```go
+err = minioClient.RemoveObject("mybucket", "myobject")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) (errorCh <-chan RemoveObjectError)
+
+从一个input channel里删除一个对象集合。一次发送到服务端的删除请求最多可删除1000个对象。通过error channel返回的错误信息。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectsCh` | _chan string_ | 要删除的对象的channel |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`errorCh` | _<-chan minio.RemoveObjectError_ | 删除时观察到的错误的Receive-only channel。 |
+
+
+```go
+objectsCh := make(chan string)
+
+// Send object names that are needed to be removed to objectsCh
+go func() {
+ defer close(objectsCh)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range minioClient.ListObjects("my-bucketname", "my-prefixname", true, nil) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
+ }
+}()
+
+for rErr := range minioClient.RemoveObjects("mybucket", objectsCh) {
+ fmt.Println("Error detected during deletion: ", rErr)
+}
+```
+
+<a name="RemoveIncompleteUpload"></a>
+### RemoveIncompleteUpload(bucketName, objectName string) error
+删除一个未完整上传的对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveIncompleteUpload("mybucket", "myobject")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+## 4. 操作加密对象
+
+<a name="NewSymmetricKey"></a>
+### NewSymmetricKey(key []byte) *encrypt.SymmetricKey
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`key` | _string_ |存储桶名称 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`symmetricKey` | _*encrypt.SymmetricKey_ | 加密解密的对称秘钥 |
+
+```go
+symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material with symmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(symKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Symmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
+
+<a name="NewAsymmetricKey"></a>
+### NewAsymmetricKey(privateKey []byte, publicKey[]byte) (*encrypt.AsymmetricKey, error)
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`privateKey` | _[]byte_ | Private key数据 |
+|`publicKey` | _[]byte_ | Public key数据 |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`asymmetricKey` | _*encrypt.AsymmetricKey_ | 加密解密的非对称秘钥 |
+|`err` | _error_ | 标准Error |
+
+
+```go
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Initialize the asymmetric key
+asymmetricKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption material for asymmetric key.
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(asymmetricKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully initialized Asymmetric key CBC materials", cbcMaterials)
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+```
+
+<a name="GetEncryptedObject"></a>
+### GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.ReadCloser, error)
+
+返回对象的解密流。读流时的常见错误。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ | 存储桶名称 |
+|`objectName` | _string_ | 对象的名称 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`stream` | _io.ReadCloser_ | 返回对象的reader,调用者需要在读取之后进行关闭。 |
+|`err` | _error | 错误信息 |
+
+
+__示例__
+
+
+```go
+// Generate a master symmetric key
+key := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+// Build the CBC encryption material
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+object, err := minioClient.GetEncryptedObject("mybucket", "myobject", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer object.Close()
+
+localFile, err := os.Create("/tmp/local-file.jpg")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer localFile.Close()
+
+if _, err = io.Copy(localFile, object); err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="PutEncryptedObject"></a>
+
+### PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials) (n int, err error)
+加密并上传对象。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`reader` | _io.Reader_ |任何实现io.Reader的Go类型 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__示例__
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Open a file to upload
+file, err := os.Open("my-testfile")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+defer file.Close()
+
+// Upload the encrypted form of the file
+n, err := minioClient.PutEncryptedObject("mybucket", "myobject", file, cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="FPutEncryptedObject"></a>
+### FPutEncryptedObject(bucketName, objectName, filePath, encryptMaterials encrypt.Materials) (n int, err error)
+通过一个文件进行加密并上传到对象。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`filePath` | _string_ |要上传的文件的路径 |
+|`encryptMaterials` | _encrypt.Materials_ | `encrypt`包提供的对流加密的接口,(更多信息,请看https://godoc.org/github.com/minio/minio-go) |
+
+__示例__
+
+
+```go
+// Load a private key
+privateKey, err := ioutil.ReadFile("private.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Load a public key
+publicKey, err := ioutil.ReadFile("public.key")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build an asymmetric key
+key, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// Build the CBC encryption module
+cbcMaterials, err := encrypt.NewCBCSecureMaterials(key)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+n, err := minioClient.FPutEncryptedObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", cbcMaterials)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully uploaded encrypted bytes: ", n)
+```
+
+<a name="NewSSEInfo"></a>
+
+### NewSSEInfo(key []byte, algo string) SSEInfo
+创建一个通过用户提供的key(SSE-C),进行服务端加解密操作的key对象。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+| :--- | :--- | :--- |
+| `key` | _[]byte_ | 未编码的二进制key数组 |
+| `algo` | _string_ | 加密算法,可以为空(默认是`AES256`) |
+
+
+## 5. Presigned操作
+
+<a name="PresignedGetObject"></a>
+### PresignedGetObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+|`reqParams` | _url.Values_ |额外的响应头,支持_response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_。 |
+
+
+__示例__
+
+
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedGetObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedPutObject"></a>
+### PresignedPutObject(bucketName, objectName string, expiry time.Duration) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+注意:你可以通过只指定对象名称上传到S3。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+
+
+__示例__
+
+
+```go
+// Generates a url which expires in a day.
+expiry := time.Second * 24 * 60 * 60 // 1 day.
+presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedHeadObject"></a>
+### PresignedHeadObject(bucketName, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+生成一个用于HTTP GET操作的presigned URL。浏览器/移动客户端可以在即使存储桶为私有的情况下也可以通过这个URL进行下载。这个presigned URL可以有一个过期时间,默认是7天。
+
+__参数__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectName` | _string_ |对象的名称 |
+|`expiry` | _time.Duration_ |presigned URL的过期时间,单位是秒 |
+|`reqParams` | _url.Values_ |额外的响应头,支持_response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_。 |
+
+
+__示例__
+
+
+```go
+// Set request parameters for content-disposition.
+reqParams := make(url.Values)
+reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+// Generates a presigned url which expires in a day.
+presignedURL, err := minioClient.PresignedHeadObject("mybucket", "myobject", time.Second * 24 * 60 * 60, reqParams)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Successfully generated presigned URL", presignedURL)
+```
+
+<a name="PresignedPostPolicy"></a>
+### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+允许给POST操作的presigned URL设置策略条件。这些策略包括比如,接收对象上传的存储桶名称,名称前缀,过期策略。
+
+```go
+// Initialize policy condition config.
+policy := minio.NewPostPolicy()
+
+// Apply upload policy restrictions:
+policy.SetBucket("mybucket")
+policy.SetKey("myobject")
+policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+
+// Only allow 'png' images.
+policy.SetContentType("image/png")
+
+// Only allow content size in range 1KB to 1MB.
+policy.SetContentLengthRange(1024, 1024*1024)
+
+// Add a user metadata using the key "custom" and value "user"
+policy.SetUserMetadata("custom", "user")
+
+// Get the POST form key/value object:
+url, formData, err := minioClient.PresignedPostPolicy(policy)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+
+// POST your content from the command line using `curl`
+fmt.Printf("curl ")
+for k, v := range formData {
+ fmt.Printf("-F %s=%s ", k, v)
+}
+fmt.Printf("-F file=@/etc/bash.bashrc ")
+fmt.Printf("%s\n", url)
+```
+
+## 6. 存储桶策略/通知
+
+<a name="SetBucketPolicy"></a>
+### SetBucketPolicy(bucketname, objectPrefix string, policy policy.BucketPolicy) error
+给存储桶或者对象前缀设置访问权限。
+
+必须引入`github.com/minio/minio-go/pkg/policy`包。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称|
+|`objectPrefix` | _string_ |对象的名称前缀|
+|`policy` | _policy.BucketPolicy_ |Policy的取值如下: |
+| | | _policy.BucketPolicyNone_ |
+| | | _policy.BucketPolicyReadOnly_ |
+| | | _policy.BucketPolicyReadWrite_ |
+| | | _policy.BucketPolicyWriteOnly_ |
+
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+
+__示例__
+
+
+```go
+// Sets 'mybucket' with a sub-directory 'myprefix' to be anonymously accessible for
+// both read and write operations.
+err = minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
+if err != nil {
+ fmt.Println(err)
+ return
+}
+```
+
+<a name="GetBucketPolicy"></a>
+### GetBucketPolicy(bucketName, objectPrefix string) (policy.BucketPolicy, error)
+获取存储桶或者对象前缀的访问权限。
+
+必须引入`github.com/minio/minio-go/pkg/policy`包。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` | _string_ |该存储桶下的对象前缀 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketPolicy` | _policy.BucketPolicy_ |取值如下: `none`, `readonly`, `readwrite`,或者`writeonly` |
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+fmt.Println("Access permissions for mybucket is", bucketPolicy)
+```
+
+<a name="ListBucketPolicies"></a>
+### ListBucketPolicies(bucketName, objectPrefix string) (map[string]BucketPolicy, error)
+获取指定的存储桶和前缀的访问策略。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`objectPrefix` | _string_ |该存储桶下的对象前缀 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketPolicies` | _map[string]minio.BucketPolicy_ |对象以及它们的权限的Map |
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for resource, permission := range bucketPolicies {
+ fmt.Println(resource, " => ", permission)
+}
+```
+
+<a name="GetBucketNotification"></a>
+### GetBucketNotification(bucketName string) (BucketNotification, error)
+获取存储桶的通知配置
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketNotification` | _minio.BucketNotification_ |含有所有通知配置的数据结构|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+bucketNotification, err := minioClient.GetBucketNotification("mybucket")
+if err != nil {
+ fmt.Println("Failed to get bucket notification configurations for mybucket", err)
+ return
+}
+
+for _, queueConfig := range bucketNotification.QueueConfigs {
+ for _, e := range queueConfig.Events {
+ fmt.Println(e + " event is enabled")
+ }
+}
+```
+
+<a name="SetBucketNotification"></a>
+### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
+给存储桶设置新的通知
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+|`bucketNotification` | _minio.BucketNotification_ |发送给配置的web service的XML |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+queueArn := minio.NewArn("aws", "sqs", "us-east-1", "804605494417", "PhotoUpdate")
+
+queueConfig := minio.NewNotificationConfig(queueArn)
+queueConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+queueConfig.AddFilterPrefix("photos/")
+queueConfig.AddFilterSuffix(".jpg")
+
+bucketNotification := minio.BucketNotification{}
+bucketNotification.AddQueue(queueConfig)
+
+err = minioClient.SetBucketNotification("mybucket", bucketNotification)
+if err != nil {
+ fmt.Println("Unable to set the bucket notification: ", err)
+ return
+}
+```
+
+<a name="RemoveAllBucketNotification"></a>
+### RemoveAllBucketNotification(bucketName string) error
+删除存储桶上所有配置的通知
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ |存储桶名称 |
+
+__返回值__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`err` | _error_ |标准Error |
+
+__示例__
+
+
+```go
+err = minioClient.RemoveAllBucketNotification("mybucket")
+if err != nil {
+ fmt.Println("Unable to remove bucket notifications.", err)
+ return
+}
+```
+
+<a name="ListenBucketNotification"></a>
+### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
+ListenBucketNotification API通过notification channel接收存储桶通知事件。返回的notification channel有两个属性,'Records'和'Err'。
+
+- 'Records'持有从服务器返回的通知信息。
+- 'Err'表示的是处理接收到的通知时报的任何错误。
+
+注意:一旦报错,notification channel就会关闭。
+
+__参数__
+
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`bucketName` | _string_ | 被监听通知的存储桶 |
+|`prefix` | _string_ | 过滤通知的对象前缀 |
+|`suffix` | _string_ | 过滤通知的对象后缀 |
+|`events` | _[]string_ | 开启指定事件类型的通知 |
+|`doneCh` | _chan struct{}_ | 在该channel上结束ListenBucketNotification iterator的一个message。 |
+
+__返回值__
+
+|参数 |类型 |描述 |
+|:---|:---| :---|
+|`notificationInfo` | _chan minio.NotificationInfo_ | 存储桶通知的channel |
+
+__minio.NotificationInfo__
+
+|属性 |类型 |描述 |
+|`notificationInfo.Records` | _[]minio.NotificationEvent_ | 通知事件的集合 |
+|`notificationInfo.Err` | _error_ | 操作时报的任何错误(标准Error) |
+
+
+__示例__
+
+
+```go
+// Create a done channel to control 'ListenBucketNotification' go routine.
+doneCh := make(chan struct{})
+
+// Indicate a background go-routine to exit cleanly upon return.
+defer close(doneCh)
+
+// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+for notificationInfo := range minioClient.ListenBucketNotification("mybucket", "myprefix/", ".mysuffix", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectAccessed:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ fmt.Println(notificationInfo.Err)
+ }
+ fmt.Println(notificationInfo)
+}
+```
+
+## 7. 客户端自定义设置
+
+<a name="SetAppInfo"></a>
+### SetAppInfo(appName, appVersion string)
+给User-Agent添加的自定义应用信息。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`appName` | _string_ | 发请求的应用名称 |
+| `appVersion`| _string_ | 发请求的应用版本 |
+
+
+__示例__
+
+
+```go
+// Set Application name and version to be used in subsequent API requests.
+minioClient.SetAppInfo("myCloudApp", "1.0.0")
+```
+
+<a name="SetCustomTransport"></a>
+### SetCustomTransport(customHTTPTransport http.RoundTripper)
+重写默认的HTTP transport,通常用于调试或者添加自定义的TLS证书。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`customHTTPTransport` | _http.RoundTripper_ | 自定义的transport,例如:为了调试对API请求响应进行追踪。|
+
+
+<a name="TraceOn"></a>
+### TraceOn(outputStream io.Writer)
+开启HTTP tracing。追踪信息输出到io.Writer,如果outputstream为nil,则trace写入到os.Stdout标准输出。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`outputStream` | _io.Writer_ | HTTP trace写入到outputStream |
+
+
+<a name="TraceOff"></a>
+### TraceOff()
+关闭HTTP tracing。
+
+<a name="SetS3TransferAccelerate"></a>
+### SetS3TransferAccelerate(acceleratedEndpoint string)
+给后续所有API请求设置ASW S3传输加速endpoint。
+注意:此API仅对AWS S3有效,对其它S3兼容的对象存储服务不生效。
+
+__参数__
+
+| 参数 | 类型 | 描述 |
+|---|---|---|
+|`acceleratedEndpoint` | _string_ | 设置新的S3传输加速endpoint。|
+
+
+## 8. 了解更多
+
+- [用Go语言创建属于你的音乐播放器APP示例](https://docs.minio.io/docs/go-music-player-app)
diff --git a/docs/zh_CN/CONTRIBUTING.md b/docs/zh_CN/CONTRIBUTING.md
new file mode 100644
index 0000000..bc408c6
--- /dev/null
+++ b/docs/zh_CN/CONTRIBUTING.md
@@ -0,0 +1,22 @@
+
+### 开发者指南
+
+``minio-go``欢迎你的贡献。为了让大家配合更加默契,我们做出如下约定:
+
+* fork项目并修改,我们鼓励大家使用pull requests进行代码相关的讨论。
+ - Fork项目
+ - 创建你的特性分支 (git checkout -b my-new-feature)
+ - Commit你的修改(git commit -am 'Add some feature')
+ - Push到远程分支(git push origin my-new-feature)
+ - 创建一个Pull Request
+
+* 当你准备创建pull request时,请确保:
+ - 写单元测试,如果你有什么疑问,请在pull request中提出来。
+ - 运行`go fmt`
+ - 将你的多个提交合并成一个提交: `git rebase -i`。你可以强制update你的pull request。
+ - 确保`go test -race ./...`和`go build`完成。
+ 注意:go test会进行功能测试,这需要你有一个AWS S3账号。将账户信息设为``ACCESS_KEY``和``SECRET_KEY``环境变量。如果想运行简版测试,请使用``go test -short -race ./...``。
+
+* 请阅读 [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments)
+ - `minio-go`项目严格符合Golang风格
+ - 如果您看到代码有问题,请随时发一个pull request
diff --git a/examples/minio/listenbucketnotification.go b/examples/minio/listenbucketnotification.go
index 037e225..4c48510 100644
--- a/examples/minio/listenbucketnotification.go
+++ b/examples/minio/listenbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/bucketexists.go b/examples/s3/bucketexists.go
index 945510d..20dea30 100644
--- a/examples/s3/bucketexists.go
+++ b/examples/s3/bucketexists.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/composeobject.go b/examples/s3/composeobject.go
index 555d98b..2f76ff0 100644
--- a/examples/s3/composeobject.go
+++ b/examples/s3/composeobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -47,14 +48,14 @@ func main() {
// Source objects to concatenate. We also specify decryption
// key for each
- src1 := minio.NewSourceInfo("bucket1", "object1", decKey)
- src1.SetMatchETag("31624deb84149d2f8ef9c385918b653a")
+ src1 := minio.NewSourceInfo("bucket1", "object1", &decKey)
+ src1.SetMatchETagCond("31624deb84149d2f8ef9c385918b653a")
- src2 := minio.NewSourceInfo("bucket2", "object2", decKey)
- src2.SetMatchETag("f8ef9c385918b653a31624deb84149d2")
+ src2 := minio.NewSourceInfo("bucket2", "object2", &decKey)
+ src2.SetMatchETagCond("f8ef9c385918b653a31624deb84149d2")
- src3 := minio.NewSourceInfo("bucket3", "object3", decKey)
- src3.SetMatchETag("5918b653a31624deb84149d2f8ef9c38")
+ src3 := minio.NewSourceInfo("bucket3", "object3", &decKey)
+ src3.SetMatchETagCond("5918b653a31624deb84149d2f8ef9c38")
// Create slice of sources.
srcs := []minio.SourceInfo{src1, src2, src3}
@@ -63,11 +64,14 @@ func main() {
encKey := minio.NewSSEInfo([]byte{8, 9, 0}, "")
// Create destination info
- dst := minio.NewDestinationInfo("bucket", "object", encKey)
+ dst, err := minio.NewDestinationInfo("bucket", "object", &encKey, nil)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
err = s3Client.ComposeObject(dst, srcs)
if err != nil {
- log.Println(err)
- return
+ log.Fatalln(err)
}
log.Println("Composed object successfully.")
diff --git a/examples/s3/copyobject.go b/examples/s3/copyobject.go
index 0de8655..a7c3eca 100644
--- a/examples/s3/copyobject.go
+++ b/examples/s3/copyobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -60,7 +61,10 @@ func main() {
// src.SetMatchETagExceptCond("31624deb84149d2f8ef9c385918b653a")
// Destination object
- dst := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil)
+ dst, err := minio.NewDestinationInfo("my-bucketname", "my-objectname", nil, nil)
+ if err != nil {
+ log.Fatalln(err)
+ }
// Initiate copy object.
err = s3Client.CopyObject(dst, src)
diff --git a/examples/s3/fgetobject-context.go b/examples/s3/fgetobject-context.go
new file mode 100644
index 0000000..6004baa
--- /dev/null
+++ b/examples/s3/fgetobject-context.go
@@ -0,0 +1,54 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ if err := s3Client.FGetObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully saved my-filename.csv")
+
+}
diff --git a/examples/s3/fgetobject.go b/examples/s3/fgetobject.go
index bef756d..819a34f 100644
--- a/examples/s3/fgetobject.go
+++ b/examples/s3/fgetobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +39,7 @@ func main() {
log.Fatalln(err)
}
- if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv"); err != nil {
+ if err := s3Client.FGetObject("my-bucketname", "my-objectname", "my-filename.csv", minio.GetObjectOptions{}); err != nil {
log.Fatalln(err)
}
log.Println("Successfully saved my-filename.csv")
diff --git a/examples/s3/fputencrypted-object.go b/examples/s3/fputencrypted-object.go
new file mode 100644
index 0000000..96eec7e
--- /dev/null
+++ b/examples/s3/fputencrypted-object.go
@@ -0,0 +1,80 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/encrypt"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Specify a local file that we will upload
+ filePath := "my-testfile"
+
+ //// Build an asymmetric key from private and public files
+ //
+ // privateKey, err := ioutil.ReadFile("private.key")
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ //
+ // publicKey, err := ioutil.ReadFile("public.key")
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ //
+ // asymmetricKey, err := NewAsymmetricKey(privateKey, publicKey)
+ // if err != nil {
+ // t.Fatal(err)
+ // }
+ ////
+
+ // Build a symmetric key
+ symmetricKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Build encryption materials which will encrypt uploaded data
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(symmetricKey)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Encrypt file content and upload to the server
+ n, err := s3Client.FPutEncryptedObject("my-bucketname", "my-objectname", filePath, cbcMaterials)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/examples/s3/fputobject-context.go b/examples/s3/fputobject-context.go
new file mode 100644
index 0000000..d7c941c
--- /dev/null
+++ b/examples/s3/fputobject-context.go
@@ -0,0 +1,53 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname
+ // and my-filename.csv are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ if _, err := s3Client.FPutObjectWithContext(ctx, "my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{ContentType: "application/csv"}); err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Successfully uploaded my-filename.csv")
+}
diff --git a/examples/s3/fputobject.go b/examples/s3/fputobject.go
index f4e60ac..34d8768 100644
--- a/examples/s3/fputobject.go
+++ b/examples/s3/fputobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -38,7 +39,9 @@ func main() {
log.Fatalln(err)
}
- if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", "application/csv"); err != nil {
+ if _, err := s3Client.FPutObject("my-bucketname", "my-objectname", "my-filename.csv", minio.PutObjectOptions{
+ ContentType: "application/csv",
+ }); err != nil {
log.Fatalln(err)
}
log.Println("Successfully uploaded my-filename.csv")
diff --git a/examples/s3/get-encrypted-object.go b/examples/s3/get-encrypted-object.go
index 8f51f26..9783beb 100644
--- a/examples/s3/get-encrypted-object.go
+++ b/examples/s3/get-encrypted-object.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/getbucketnotification.go b/examples/s3/getbucketnotification.go
index 67f010e..19349ba 100644
--- a/examples/s3/getbucketnotification.go
+++ b/examples/s3/getbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/getbucketpolicy.go b/examples/s3/getbucketpolicy.go
index e5f9604..f9ac89b 100644
--- a/examples/s3/getbucketpolicy.go
+++ b/examples/s3/getbucketpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/getobject-context.go b/examples/s3/getobject-context.go
new file mode 100644
index 0000000..c7d4170
--- /dev/null
+++ b/examples/s3/getobject-context.go
@@ -0,0 +1,73 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "io"
+ "log"
+ "os"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname, my-objectname and
+ // my-testfile are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ opts := minio.GetObjectOptions{}
+ opts.SetModified(time.Now().Round(10 * time.Minute)) // get object if was modified within the last 10 minutes
+ reader, err := s3Client.GetObjectWithContext(ctx, "my-bucketname", "my-objectname", opts)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer reader.Close()
+
+ localFile, err := os.Create("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer localFile.Close()
+
+ stat, err := reader.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ if _, err := io.CopyN(localFile, reader, stat.Size); err != nil {
+ log.Fatalln(err)
+ }
+}
diff --git a/examples/s3/getobject.go b/examples/s3/getobject.go
index 96bb855..e17ef81 100644
--- a/examples/s3/getobject.go
+++ b/examples/s3/getobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,7 +41,7 @@ func main() {
log.Fatalln(err)
}
- reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/listbucketpolicies.go b/examples/s3/listbucketpolicies.go
index 19a2d1b..43edd0c 100644
--- a/examples/s3/listbucketpolicies.go
+++ b/examples/s3/listbucketpolicies.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/listbuckets.go b/examples/s3/listbuckets.go
index 81a99e6..5eae587 100644
--- a/examples/s3/listbuckets.go
+++ b/examples/s3/listbuckets.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/listincompleteuploads.go b/examples/s3/listincompleteuploads.go
index 34771e4..a5a79b6 100644
--- a/examples/s3/listincompleteuploads.go
+++ b/examples/s3/listincompleteuploads.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/listobjects-N.go b/examples/s3/listobjects-N.go
index 5dde367..55bceb4 100644
--- a/examples/s3/listobjects-N.go
+++ b/examples/s3/listobjects-N.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/listobjects.go b/examples/s3/listobjects.go
index 4fd5c06..1da2e3f 100644
--- a/examples/s3/listobjects.go
+++ b/examples/s3/listobjects.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/listobjectsV2.go b/examples/s3/listobjectsV2.go
index b52b4da..190aec3 100644
--- a/examples/s3/listobjectsV2.go
+++ b/examples/s3/listobjectsV2.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/makebucket.go b/examples/s3/makebucket.go
index ae222a8..419c96c 100644
--- a/examples/s3/makebucket.go
+++ b/examples/s3/makebucket.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/presignedgetobject.go b/examples/s3/presignedgetobject.go
index 11be0c0..fd7fb9e 100644
--- a/examples/s3/presignedgetobject.go
+++ b/examples/s3/presignedgetobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/presignedheadobject.go b/examples/s3/presignedheadobject.go
new file mode 100644
index 0000000..8dbc0a4
--- /dev/null
+++ b/examples/s3/presignedheadobject.go
@@ -0,0 +1,54 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "net/url"
+ "time"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Set request parameters
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
+
+ // Gernerate presigned get object url.
+ presignedURL, err := s3Client.PresignedHeadObject("my-bucketname", "my-objectname", time.Duration(1000)*time.Second, reqParams)
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println(presignedURL)
+}
diff --git a/examples/s3/presignedpostpolicy.go b/examples/s3/presignedpostpolicy.go
index 3f37cef..205ac95 100644
--- a/examples/s3/presignedpostpolicy.go
+++ b/examples/s3/presignedpostpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/presignedputobject.go b/examples/s3/presignedputobject.go
index 3db6f6e..b2f8b4f 100644
--- a/examples/s3/presignedputobject.go
+++ b/examples/s3/presignedputobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/put-encrypted-object.go b/examples/s3/put-encrypted-object.go
index b8f7e12..cdf09ac 100644
--- a/examples/s3/put-encrypted-object.go
+++ b/examples/s3/put-encrypted-object.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -75,7 +76,7 @@ func main() {
}
// Encrypt file content and upload to the server
- n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials, nil, nil)
+ n, err := s3Client.PutEncryptedObject("my-bucketname", "my-objectname", file, cbcMaterials)
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/putobject-context.go b/examples/s3/putobject-context.go
new file mode 100644
index 0000000..acc923f
--- /dev/null
+++ b/examples/s3/putobject-context.go
@@ -0,0 +1,68 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "os"
+ "time"
+
+ "context"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
+ // my-objectname are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
+ defer cancel()
+
+ object, err := os.Open("my-testfile")
+ if err != nil {
+ log.Fatalln(err)
+ }
+ defer object.Close()
+
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ n, err := s3Client.PutObjectWithContext(ctx, "my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{
+ ContentType: "application/octet-stream",
+ })
+ if err != nil {
+ log.Fatalln(err)
+ }
+ log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
+}
diff --git a/examples/s3/putobject-getobject-sse.go b/examples/s3/putobject-getobject-sse.go
index 92e6a48..3d3b2fd 100644
--- a/examples/s3/putobject-getobject-sse.go
+++ b/examples/s3/putobject-getobject-sse.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,7 +25,6 @@ import (
"encoding/base64"
"io/ioutil"
"log"
- "net/http"
minio "github.com/minio/minio-go"
)
@@ -54,24 +54,24 @@ func main() {
// of the encryption key or to decrypt the contents of the
// encrypted object. That means, if you lose the encryption
// key, you lose the object.
- var metadata = map[string][]string{
- "x-amz-server-side-encryption-customer-algorithm": []string{"AES256"},
- "x-amz-server-side-encryption-customer-key": []string{encryptionKey},
- "x-amz-server-side-encryption-customer-key-MD5": []string{encryptionKeyMD5},
+ var metadata = map[string]string{
+ "x-amz-server-side-encryption-customer-algorithm": "AES256",
+ "x-amz-server-side-encryption-customer-key": encryptionKey,
+ "x-amz-server-side-encryption-customer-key-MD5": encryptionKeyMD5,
}
// minioClient.TraceOn(os.Stderr) // Enable to debug.
- _, err = minioClient.PutObjectWithMetadata("mybucket", "my-encrypted-object.txt", content, metadata, nil)
+ _, err = minioClient.PutObject("mybucket", "my-encrypted-object.txt", content, 11, minio.PutObjectOptions{UserMetadata: metadata})
if err != nil {
log.Fatalln(err)
}
- var reqHeaders = minio.RequestHeaders{Header: http.Header{}}
+ opts := minio.GetObjectOptions{}
for k, v := range metadata {
- reqHeaders.Set(k, v[0])
+ opts.Set(k, v)
}
coreClient := minio.Core{minioClient}
- reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", reqHeaders)
+ reader, _, err := coreClient.GetObject("mybucket", "my-encrypted-object.txt", opts)
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/putobject-progress.go b/examples/s3/putobject-progress.go
index 1179fd7..0e92dd6 100644
--- a/examples/s3/putobject-progress.go
+++ b/examples/s3/putobject-progress.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,7 +40,7 @@ func main() {
log.Fatalln(err)
}
- reader, err := s3Client.GetObject("my-bucketname", "my-objectname")
+ reader, err := s3Client.GetObject("my-bucketname", "my-objectname", minio.GetObjectOptions{})
if err != nil {
log.Fatalln(err)
}
@@ -54,8 +55,8 @@ func main() {
// the Reads inside.
progress := pb.New64(objectInfo.Size)
progress.Start()
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname-progress", reader, objectInfo.Size, minio.PutObjectOptions{ContentType: "application/octet-stream", Progress: progress})
- n, err := s3Client.PutObjectWithProgress("my-bucketname", "my-objectname-progress", reader, "application/octet-stream", progress)
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/putobject-s3-accelerate.go b/examples/s3/putobject-s3-accelerate.go
index e47976f..06345cd 100644
--- a/examples/s3/putobject-s3-accelerate.go
+++ b/examples/s3/putobject-s3-accelerate.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,7 +41,7 @@ func main() {
}
// Enable S3 transfer accelerate endpoint.
- s3Client.S3TransferAccelerate("s3-accelerate.amazonaws.com")
+ s3Client.SetS3TransferAccelerate("s3-accelerate.amazonaws.com")
object, err := os.Open("my-testfile")
if err != nil {
@@ -48,7 +49,12 @@ func main() {
}
defer object.Close()
- n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/putobject-streaming.go b/examples/s3/putobject-streaming.go
index d10407d..85b78dd 100644
--- a/examples/s3/putobject-streaming.go
+++ b/examples/s3/putobject-streaming.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -45,7 +46,7 @@ func main() {
}
defer object.Close()
- n, err := s3Client.PutObjectStreaming("my-bucketname", "my-objectname", object)
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, -1, minio.PutObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/putobject.go b/examples/s3/putobject.go
index caa7313..b9e4ff1 100644
--- a/examples/s3/putobject.go
+++ b/examples/s3/putobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -44,8 +45,12 @@ func main() {
log.Fatalln(err)
}
defer object.Close()
+ objectStat, err := object.Stat()
+ if err != nil {
+ log.Fatalln(err)
+ }
- n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
+ n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, objectStat.Size(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
log.Fatalln(err)
}
diff --git a/examples/s3/removeallbucketnotification.go b/examples/s3/removeallbucketnotification.go
index 0f5f3a7..1186afa 100644
--- a/examples/s3/removeallbucketnotification.go
+++ b/examples/s3/removeallbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/removebucket.go b/examples/s3/removebucket.go
index fb013ca..7a7737e 100644
--- a/examples/s3/removebucket.go
+++ b/examples/s3/removebucket.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/removeincompleteupload.go b/examples/s3/removeincompleteupload.go
index d486182..31cc879 100644
--- a/examples/s3/removeincompleteupload.go
+++ b/examples/s3/removeincompleteupload.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/removeobject.go b/examples/s3/removeobject.go
index 13b00b4..7e58485 100644
--- a/examples/s3/removeobject.go
+++ b/examples/s3/removeobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/removeobjects.go b/examples/s3/removeobjects.go
index 5946069..b912bc8 100644
--- a/examples/s3/removeobjects.go
+++ b/examples/s3/removeobjects.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -20,7 +21,6 @@ package main
import (
"log"
- "strconv"
"github.com/minio/minio-go"
)
@@ -44,8 +44,12 @@ func main() {
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
- for i := 0; i < 10; i++ {
- objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
+ // List all objects from a bucket-name with a matching prefix.
+ for object := range s3Client.ListObjects("my-bucketname", "my-prefixname", true, doneCh) {
+ if object.Err != nil {
+ log.Fatalln(object.Err)
+ }
+ objectsCh <- object.Key
}
}()
diff --git a/examples/s3/setbucketnotification.go b/examples/s3/setbucketnotification.go
index 5fe1e31..b5af30f 100644
--- a/examples/s3/setbucketnotification.go
+++ b/examples/s3/setbucketnotification.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/setbucketpolicy.go b/examples/s3/setbucketpolicy.go
index 40906ee..c81fb50 100644
--- a/examples/s3/setbucketpolicy.go
+++ b/examples/s3/setbucketpolicy.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/examples/s3/statobject.go b/examples/s3/statobject.go
index 4c5453a..0b27a83 100644
--- a/examples/s3/statobject.go
+++ b/examples/s3/statobject.go
@@ -1,7 +1,8 @@
// +build ignore
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -37,7 +38,7 @@ func main() {
if err != nil {
log.Fatalln(err)
}
- stat, err := s3Client.StatObject("my-bucketname", "my-objectname")
+ stat, err := s3Client.StatObject("my-bucketname", "my-objectname", minio.StatObjectOptions{})
if err != nil {
log.Fatalln(err)
}
diff --git a/functional_tests.go b/functional_tests.go
new file mode 100644
index 0000000..2e0d1e7
--- /dev/null
+++ b/functional_tests.go
@@ -0,0 +1,6721 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "bytes"
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math/rand"
+ "mime/multipart"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ minio "github.com/minio/minio-go"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/minio/minio-go/pkg/encrypt"
+ "github.com/minio/minio-go/pkg/policy"
+)
+
+const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
+const (
+ letterIdxBits = 6 // 6 bits to represent a letter index
+ letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
+ letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
+)
+const (
+ serverEndpoint = "SERVER_ENDPOINT"
+ accessKey = "ACCESS_KEY"
+ secretKey = "SECRET_KEY"
+ enableHTTPS = "ENABLE_HTTPS"
+)
+
+type mintJSONFormatter struct {
+}
+
+func (f *mintJSONFormatter) Format(entry *log.Entry) ([]byte, error) {
+ data := make(log.Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
+ }
+
+ serialized, err := json.Marshal(data)
+ if err != nil {
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ }
+ return append(serialized, '\n'), nil
+}
+
+func cleanEmptyEntries(fields log.Fields) log.Fields {
+ cleanFields := log.Fields{}
+ for k, v := range fields {
+ if v != "" {
+ cleanFields[k] = v
+ }
+ }
+ return cleanFields
+}
+
+// log successful test runs
+func successLogger(testName string, function string, args map[string]interface{}, startTime time.Time) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ // log with the fields as per mint
+ fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args, "duration": duration.Nanoseconds() / 1000000, "status": "PASS"}
+ return log.WithFields(cleanEmptyEntries(fields))
+}
+
+// As few of the features are not available in Gateway(s) currently, Check if err value is NotImplemented,
+// and log as NA in that case and continue execution. Otherwise log as failure and return
+func logError(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) {
+ // If server returns NotImplemented we assume it is gateway mode and hence log it as info and move on to next tests
+ // Special case for ComposeObject API as it is implemented on client side and adds specific error details like `Error in upload-part-copy` in
+ // addition to NotImplemented error returned from server
+ if isErrNotImplemented(err) {
+ ignoredLog(testName, function, args, startTime, message).Info()
+ } else {
+ failureLog(testName, function, args, startTime, alert, message, err).Fatal()
+ }
+}
+
+// log failed test runs
+func failureLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string, message string, err error) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ var fields log.Fields
+ // log with the fields as per mint
+ if err != nil {
+ fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message, "error": err}
+ } else {
+ fields = log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "FAIL", "alert": alert, "message": message}
+ }
+ return log.WithFields(cleanEmptyEntries(fields))
+}
+
+// log not applicable test runs
+func ignoredLog(testName string, function string, args map[string]interface{}, startTime time.Time, alert string) *log.Entry {
+ // calculate the test case duration
+ duration := time.Since(startTime)
+ // log with the fields as per mint
+ fields := log.Fields{"name": "minio-go: " + testName, "function": function, "args": args,
+ "duration": duration.Nanoseconds() / 1000000, "status": "NA", "alert": alert}
+ return log.WithFields(cleanEmptyEntries(fields))
+}
+
+// Delete objects in given bucket, recursively
+func cleanupBucket(bucketName string, c *minio.Client) error {
+ // Create a done channel to control 'ListObjectsV2' go routine.
+ doneCh := make(chan struct{})
+ // Exit cleanly upon return.
+ defer close(doneCh)
+ // Iterate over all objects in the bucket via listObjectsV2 and delete
+ for objCh := range c.ListObjectsV2(bucketName, "", true, doneCh) {
+ if objCh.Err != nil {
+ return objCh.Err
+ }
+ if objCh.Key != "" {
+ err := c.RemoveObject(bucketName, objCh.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ for objPartInfo := range c.ListIncompleteUploads(bucketName, "", true, doneCh) {
+ if objPartInfo.Err != nil {
+ return objPartInfo.Err
+ }
+ if objPartInfo.Key != "" {
+ err := c.RemoveIncompleteUpload(bucketName, objPartInfo.Key)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ // objects are already deleted, clear the buckets now
+ err := c.RemoveBucket(bucketName)
+ if err != nil {
+ return err
+ }
+ return err
+}
+
+func isErrNotImplemented(err error) bool {
+ return minio.ToErrorResponse(err).Code == "NotImplemented"
+}
+
+func init() {
+ // If server endpoint is not set, all tests default to
+ // using https://play.minio.io:9000
+ if os.Getenv(serverEndpoint) == "" {
+ os.Setenv(serverEndpoint, "play.minio.io:9000")
+ os.Setenv(accessKey, "Q3AM3UQ867SPQQA43P2F")
+ os.Setenv(secretKey, "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG")
+ os.Setenv(enableHTTPS, "1")
+ }
+}
+
+var mintDataDir = os.Getenv("MINT_DATA_DIR")
+
+func getMintDataDirFilePath(filename string) (fp string) {
+ if mintDataDir == "" {
+ return
+ }
+ return filepath.Join(mintDataDir, filename)
+}
+
+type sizedReader struct {
+ io.Reader
+ size int
+}
+
+func (l *sizedReader) Size() int {
+ return l.size
+}
+
+func (l *sizedReader) Close() error {
+ return nil
+}
+
+type randomReader struct{ seed []byte }
+
+func (r *randomReader) Read(b []byte) (int, error) {
+ return copy(b, bytes.Repeat(r.seed, len(b))), nil
+}
+
+// read data from file if it exists or optionally create a buffer of particular size
+func getDataReader(fileName string) io.ReadCloser {
+ if mintDataDir == "" {
+ size := dataFileMap[fileName]
+ return &sizedReader{
+ Reader: io.LimitReader(&randomReader{
+ seed: []byte("a"),
+ }, int64(size)),
+ size: size,
+ }
+ }
+ reader, _ := os.Open(getMintDataDirFilePath(fileName))
+ return reader
+}
+
+// randString generates random names and prepends them with a known prefix.
+func randString(n int, src rand.Source, prefix string) string {
+ b := make([]byte, n)
+ // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
+ for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
+ if remain == 0 {
+ cache, remain = src.Int63(), letterIdxMax
+ }
+ if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
+ b[i] = letterBytes[idx]
+ i--
+ }
+ cache >>= letterIdxBits
+ remain--
+ }
+ return prefix + string(b[0:30-len(prefix)])
+}
+
+var dataFileMap = map[string]int{
+ "datafile-1-b": 1,
+ "datafile-10-kB": 10 * humanize.KiByte,
+ "datafile-33-kB": 33 * humanize.KiByte,
+ "datafile-100-kB": 100 * humanize.KiByte,
+ "datafile-1.03-MB": 1056 * humanize.KiByte,
+ "datafile-1-MB": 1 * humanize.MiByte,
+ "datafile-5-MB": 5 * humanize.MiByte,
+ "datafile-6-MB": 6 * humanize.MiByte,
+ "datafile-11-MB": 11 * humanize.MiByte,
+ "datafile-65-MB": 65 * humanize.MiByte,
+}
+
+func isQuickMode() bool {
+ return os.Getenv("MODE") == "quick"
+}
+
+func getFuncName() string {
+ pc, _, _, _ := runtime.Caller(1)
+ return strings.TrimPrefix(runtime.FuncForPC(pc).Name(), "main.")
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketError() {
+ region := "eu-central-1"
+
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // skipping region functional tests for non s3 runs
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket Failed", err)
+ return
+ }
+ if err = c.MakeBucket(bucketName, region); err == nil {
+ logError(testName, function, args, startTime, "", "Bucket already exists", err)
+ return
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testMetadataSizeLimit() {
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, objectSize, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts.UserMetadata": "",
+ }
+ rand.Seed(startTime.Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
+ }
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ const HeaderSizeLimit = 8 * 1024
+ const UserMetadataLimit = 2 * 1024
+
+ // Meta-data greater than the 2 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata := make(map[string]string)
+ metadata["X-Amz-Meta-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+UserMetadataLimit-len("X-Amz-Meta-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with user-defined metadata exceeding metadata size limits", nil)
+ return
+ }
+
+ // Meta-data (headers) greater than the 8 KB limit of AWS - PUT calls with this meta-data should fail
+ metadata = make(map[string]string)
+ metadata["X-Amz-Mint-Test"] = string(bytes.Repeat([]byte("m"), 1+HeaderSizeLimit-len("X-Amz-Mint-Test")))
+ args["metadata"] = fmt.Sprint(metadata)
+ _, err = c.PutObject(bucketName, objectName, bytes.NewReader(nil), 0, minio.PutObjectOptions{UserMetadata: metadata})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Created object with headers exceeding header size limits", nil)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegions() {
+ region := "eu-central-1"
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ // initialize logging params
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": region,
+ }
+
+ // skipping region functional tests for non s3 runs
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ region = "us-west-2"
+ args["region"] = region
+ if err = c.MakeBucket(bucketName+".withperiod", region); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectReadAt() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "objectContentType",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object content type
+ objectContentType := "binary/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: objectContentType})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Get Object failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat Object failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Number of bytes in stat does not match, expected %d got %d", bufSize, st.Size), err)
+ return
+ }
+ if st.ContentType != objectContentType {
+ logError(testName, function, args, startTime, "", "Content types don't match", err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object is already closed, didn't return error on Close", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test PutObject using a large data to trigger multipart readat
+func testPutObjectWithMetadata() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}",
+ }
+
+ if isQuickMode() {
+ ignoredLog(testName, function, args, startTime, "Skipping functional tests for short runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Make bucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+
+ args["metadata"] = map[string][]string{
+ "Content-Type": {customContentType},
+ }
+
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{
+ ContentType: customContentType})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if st.ContentType != customContentType {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected "+customContentType+" got "+st.ContentType, err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test put object with streaming signature.
+func testPutObjectStreaming() {
+ // initialize logging params
+ objectName := "test-object"
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": objectName,
+ "size": -1,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload an object.
+ sizes := []int64{0, 64*1024 - 1, 64 * 1024}
+
+ for _, size := range sizes {
+ data := bytes.Repeat([]byte("a"), int(size))
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(data), int64(size), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
+ }
+
+ if n != size {
+ logError(testName, function, args, startTime, "", "Expected upload object size doesn't match with PutObjectStreaming return value", err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test listing partially uploaded objects.
+func testListPartiallyUploaded() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "isRecursive": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-65-MB"]
+ r := bytes.NewReader(bytes.Repeat([]byte("0"), bufSize*2))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, (int64(bufSize)*2)/25)
+ if cerr != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize*2), minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
+ }
+ if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+ logError(testName, function, args, startTime, "", "String not found in PutObject output", err)
+ return
+ }
+
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+ isRecursive := true
+ args["isRecursive"] = isRecursive
+
+ multiPartObjectCh := c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)
+ for multiPartObject := range multiPartObjectCh {
+ if multiPartObject.Err != nil {
+ logError(testName, function, args, startTime, "", "Multipart object error", multiPartObject.Err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test get object seeker from the end, using whence set to '2'.
+func testGetObjectSeekEnd() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes read does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+
+ pos, err := r.Seek(-100, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
+ }
+ if pos != st.Size-100 {
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
+ }
+ buf2 := make([]byte, 100)
+ m, err := io.ReadFull(r, buf2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Error reading through io.ReadFull", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "Number of bytes dont match, expected "+string(len(buf2))+" got "+string(m), err)
+ return
+ }
+ hexBuf1 := fmt.Sprintf("%02x", buf[len(buf)-100:])
+ hexBuf2 := fmt.Sprintf("%02x", buf2[:m])
+ if hexBuf1 != hexBuf2 {
+ logError(testName, function, args, startTime, "", "Values at same index dont match", err)
+ return
+ }
+ pos, err = r.Seek(-100, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Object Seek failed", err)
+ return
+ }
+ if pos != st.Size-100 {
+ logError(testName, function, args, startTime, "", "Incorrect position", err)
+ return
+ }
+ if err = r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "ObjectClose failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwice() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "PutObject response doesn't match sent bytes, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Object Close failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Already closed object. No error returned", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test removing multiple objects with Remove API
+func testRemoveMultipleObjects() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveObjects(bucketName, objectsCh)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 8))
+
+ // Multi remove of 1100 objects
+ nrObjects := 200
+
+ objectsCh := make(chan string)
+
+ go func() {
+ defer close(objectsCh)
+ // Upload objects and send them to objectsCh
+ for i := 0; i < nrObjects; i++ {
+ objectName := "sample" + strconv.Itoa(i) + ".txt"
+ _, err = c.PutObject(bucketName, objectName, r, 8, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ continue
+ }
+ objectsCh <- objectName
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := c.RemoveObjects(bucketName, objectsCh)
+
+ // Check if errorCh doesn't receive any error
+ select {
+ case r, more := <-errorCh:
+ if more {
+ logError(testName, function, args, startTime, "", "Unexpected error", r.Err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploaded() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, 128*1024)
+ if cerr != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, 128*1024, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
+ }
+ if !strings.Contains(err.Error(), "proactively closed to be verified later") {
+ logError(testName, function, args, startTime, "", "String not found", err)
+ return
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests FPutObject of a big file to trigger multipart
+func testFPutObjectMultipart() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ var fileName = getMintDataDirFilePath("datafile-65-MB")
+ if fileName == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+ // Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
+ }
+ fileName = file.Name()
+ args["fileName"] = fileName
+ }
+ totalSize := dataFileMap["datafile-65-MB"]
+ // Set base object name
+ objectName := bucketName + "FPutObject" + "-standard"
+ args["objectName"] = objectName
+
+ objectContentType := "testapplication/octet-stream"
+ args["objectContentType"] = objectContentType
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName, fileName, minio.PutObjectOptions{ContentType: objectContentType})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Unexpected error", err)
+ return
+ }
+ if objInfo.Size != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(totalSize))+" got "+string(objInfo.Size), err)
+ return
+ }
+ if objInfo.ContentType != objectContentType {
+ logError(testName, function, args, startTime, "", "ContentType doesn't match", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests FPutObject with null contentType (default = application/octet-stream)
+func testFPutObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
+
+ // Make a new bucket.
+ args["bucketName"] = bucketName
+ args["location"] = location
+ function = "MakeBucket()bucketName, location"
+ err = c.MakeBucket(bucketName, location)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getMintDataDirFilePath("datafile-65-MB")
+ if fName == "" {
+ // Make a temp file with minPartSize bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ // Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
+ if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+ totalSize := dataFileMap["datafile-65-MB"]
+
+ // Set base object name
+ function = "FPutObject(bucketName, objectName, fileName, opts)"
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName + "-standard"
+ args["fileName"] = fName
+ args["opts"] = minio.PutObjectOptions{ContentType: "application/octet-stream"}
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err := c.FPutObject(bucketName, objectName+"-standard", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ args["objectName"] = objectName + "-Octet"
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
+ }
+ srcFile, err := os.Open(fName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ defer srcFile.Close()
+ // Add extension to temp file name
+ tmpFile, err := os.Create(fName + ".gtar")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File create failed", err)
+ return
+ }
+ defer tmpFile.Close()
+ _, err = io.Copy(tmpFile, srcFile)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ args["objectName"] = objectName + "-GTar"
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fName+".gtar", minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
+ }
+
+ // Check headers
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-standard"
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
+ return
+ }
+
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-Octet"
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/octet-stream, got "+rStandard.ContentType, err)
+ return
+ }
+
+ function = "StatObject(bucketName, objectName, opts)"
+ args["objectName"] = objectName + "-GTar"
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ logError(testName, function, args, startTime, "", "ContentType does not match, expected application/x-gtar, got "+rStandard.ContentType, err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ if err = os.Remove(fName + ".gtar"); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests FPutObjectWithContext request context cancels after timeout
+func testFPutObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+ totalSize := dataFileMap["datafile-1-MB"]
+
+ // Set base object name
+ objectName := bucketName + "FPutObjectWithContext"
+ args["objectName"] = objectName
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
+ n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on long timeout", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(totalSize)+", got "+string(n), err)
+ return
+ }
+
+ _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Tests FPutObjectWithContext request context cancels after timeout
+func testFPutObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "opts": "minio.PutObjectOptions{ContentType:objectContentType}",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload 1 parts worth of data to use multipart upload.
+ // Use different data in part for multipart tests to check parts are uploaded in correct order.
+ var fName = getMintDataDirFilePath("datafile-1-MB")
+ if fName == "" {
+ // Make a temp file with 1 MiB bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectWithContextTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Temp file creation failed", err)
+ return
+ }
+
+ // Upload 1 parts to trigger multipart upload
+ if _, err = io.Copy(file, getDataReader("datafile-1-MB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ // Close the file pro-actively for windows.
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+ defer os.Remove(file.Name())
+ fName = file.Name()
+ }
+ totalSize := dataFileMap["datafile-1-MB"]
+
+ // Set base object name
+ objectName := bucketName + "FPutObjectWithContext"
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Perform standard FPutObjectWithContext with contentType provided (Expecting application/octet-stream)
+ _, err = c.FPutObjectWithContext(ctx, bucketName, objectName+"-Shorttimeout", fName, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+ // Perform FPutObjectWithContext with a long timeout. Expect the put object to succeed
+ n, err := c.FPutObjectWithContext(ctx, bucketName, objectName+"-Longtimeout", fName, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObjectWithContext shouldn't fail on longer timeout", err)
+ return
+ }
+ if n != int64(totalSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match:wanted"+string(totalSize)+" got "+string(n), err)
+ return
+ }
+
+ _, err = c.StatObject(bucketName, objectName+"-Longtimeout", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test validates putObject with context to see if request cancellation is honored.
+func testPutObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObjectWithContext(ctx, bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "opts": "",
+ }
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ args["opts"] = minio.PutObjectOptions{ContentType: "binary/octet-stream"}
+ defer cancel()
+
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
+ }
+
+ defer func() {
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ }()
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat object failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ // This following function helps us to compare data from the reader after seek
+ // with the data from the original buffer
+ cmpData := func(r io.Reader, start, end int) {
+ if end-start == 0 {
+ return
+ }
+ buffer := bytes.NewBuffer([]byte{})
+ if _, err := io.CopyN(buffer, r, int64(bufSize)); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "CopyN failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[start:end], buffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+ }
+
+ // Generic seek error for errors other than io.EOF
+ seekErr := errors.New("seek error")
+
+ testCases := []struct {
+ offset int64
+ whence int
+ pos int64
+ err error
+ shouldCmp bool
+ start int
+ end int
+ }{
+ // Start from offset 0, fetch data and compare
+ {0, 0, 0, nil, true, 0, 0},
+ // Start from offset 2048, fetch data and compare
+ {2048, 0, 2048, nil, true, 2048, bufSize},
+ // Start from offset larger than possible
+ {int64(bufSize) + 1024, 0, 0, seekErr, false, 0, 0},
+ // Move to offset 0 without comparing
+ {0, 0, 0, nil, false, 0, 0},
+ // Move one step forward and compare
+ {1, 1, 1, nil, true, 1, bufSize},
+ // Move larger than possible
+ {int64(bufSize), 1, 0, seekErr, false, 0, 0},
+ // Provide negative offset with CUR_SEEK
+ {int64(-1), 1, 0, seekErr, false, 0, 0},
+ // Test with whence SEEK_END and with positive offset
+ {1024, 2, int64(bufSize) - 1024, io.EOF, true, 0, 0},
+ // Test with whence SEEK_END and with negative offset
+ {-1024, 2, int64(bufSize) - 1024, nil, true, bufSize - 1024, bufSize},
+ // Test with whence SEEK_END and with large negative offset
+ {-int64(bufSize) * 2, 2, 0, seekErr, true, 0, 0},
+ }
+
+ for i, testCase := range testCases {
+ // Perform seek operation
+ n, err := r.Seek(testCase.offset, testCase.whence)
+ // We expect an error
+ if testCase.err == seekErr && err == nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
+ }
+ // We expect a specific error
+ if testCase.err != seekErr && testCase.err != err {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", unexpected err value: expected: "+testCase.err.Error()+", found: "+err.Error(), err)
+ return
+ }
+ // If we expect an error go to the next loop
+ if testCase.err != nil {
+ continue
+ }
+ // Check the returned seek pos
+ if n != testCase.pos {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", number of bytes seeked does not match, expected "+string(testCase.pos)+", got "+string(n), err)
+ return
+ }
+ // Compare only if shouldCmp is activated
+ if testCase.shouldCmp {
+ cmpData(r, testCase.start, testCase.end)
+ }
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
+ }
+
+ // read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ offset := int64(2048)
+
+ // read directly
+ buf1 := make([]byte, 512)
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ // Test readAt before stat is called.
+ m, err := r.ReadAt(buf1, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf1) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf1))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf1, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+", got "+string(st.Size), err)
+ return
+ }
+
+ m, err = r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+", got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test Presigned Post Policy
+func testPresignedPostPolicy() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PresignedPostPolicy(policy)"
+ args := map[string]interface{}{
+ "policy": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ metadataKey := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ metadataValue := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
+ }
+
+ policy := minio.NewPostPolicy()
+
+ if err := policy.SetBucket(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetBucket did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetKey(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetKey did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetKeyStartsWith(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetKeyStartsWith did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetExpires(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC)); err == nil {
+ logError(testName, function, args, startTime, "", "SetExpires did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetContentType(""); err == nil {
+ logError(testName, function, args, startTime, "", "SetContentType did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetContentLengthRange(1024*1024, 1024); err == nil {
+ logError(testName, function, args, startTime, "", "SetContentLengthRange did not fail for invalid conditions", err)
+ return
+ }
+ if err := policy.SetUserMetadata("", ""); err == nil {
+ logError(testName, function, args, startTime, "", "SetUserMetadata did not fail for invalid conditions", err)
+ return
+ }
+
+ policy.SetBucket(bucketName)
+ policy.SetKey(objectName)
+ policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
+ policy.SetContentType("binary/octet-stream")
+ policy.SetContentLengthRange(10, 1024*1024)
+ policy.SetUserMetadata(metadataKey, metadataValue)
+ args["policy"] = policy.String()
+
+ presignedPostPolicyURL, formData, err := c.PresignedPostPolicy(policy)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPostPolicy failed", err)
+ return
+ }
+
+ var formBuf bytes.Buffer
+ writer := multipart.NewWriter(&formBuf)
+ for k, v := range formData {
+ writer.WriteField(k, v)
+ }
+
+ // Get a 33KB file to upload and test if set post policy works
+ var filePath = getMintDataDirFilePath("datafile-33-kB")
+ if filePath == "" {
+ // Make a temp file with 33 KB data.
+ file, err := ioutil.TempFile(os.TempDir(), "PresignedPostPolicyTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+ if _, err = io.Copy(file, getDataReader("datafile-33-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if err = file.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "File Close failed", err)
+ return
+ }
+ filePath = file.Name()
+ }
+
+ // add file to post request
+ f, err := os.Open(filePath)
+ defer f.Close()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ w, err := writer.CreateFormFile("file", filePath)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CreateFormFile failed", err)
+ return
+ }
+
+ _, err = io.Copy(w, f)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ writer.Close()
+
+ // make post request with correct form data
+ res, err := http.Post(presignedPostPolicyURL.String(), writer.FormDataContentType(), bytes.NewReader(formBuf.Bytes()))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Http request failed", err)
+ return
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusNoContent {
+ logError(testName, function, args, startTime, "", "Http request failed", errors.New(res.Status))
+ return
+ }
+
+ // expected path should be absolute path of the object
+ var scheme string
+ if mustParseBool(os.Getenv(enableHTTPS)) {
+ scheme = "https://"
+ } else {
+ scheme = "http://"
+ }
+
+ expectedLocation := scheme + os.Getenv(serverEndpoint) + "/" + bucketName + "/" + objectName
+
+ if val, ok := res.Header["Location"]; ok {
+ if val[0] != expectedLocation {
+ logError(testName, function, args, startTime, "", "Location in header response is incorrect", err)
+ return
+ }
+ } else {
+ logError(testName, function, args, startTime, "", "Location not found in header response", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(dst, src)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+", got "+string(n), err)
+ return
+ }
+
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Copy Source
+ src := minio.NewSourceInfo(bucketName, objectName, nil)
+ args["src"] = src
+
+ // Set copy conditions.
+
+ // All invalid conditions first.
+ err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetMatchETagCond("")
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetMatchETagExceptCond("")
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
+ return
+ }
+
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
+ return
+ }
+ err = src.SetMatchETagCond(objInfo.ETag)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
+ return
+ }
+
+ dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ args["dst"] = dst
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ // Perform the Copy
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Source object
+ r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+", got "+string(objInfo.Size), err)
+ return
+ }
+
+ // CopyObject again but with wrong conditions
+ src = minio.NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
+ return
+ }
+ err = src.SetMatchETagExceptCond(objInfo.ETag)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
+ return
+ }
+
+ // Perform the Copy which should fail
+ err = c.CopyObject(dst, src)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ if err = cleanupBucket(bucketName+"-copy", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// TestEncryptionPutGet tests client side encryption
+func testEncryptionPutGet() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutEncryptedObject(bucketName, objectName, reader, cbcMaterials, metadata, progress)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "cbcMaterials": "",
+ "metadata": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate a symmetric key
+ symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Generate an assymmetric key from predefine public and private certificates
+ privateKey, err := hex.DecodeString(
+ "30820277020100300d06092a864886f70d0101010500048202613082025d" +
+ "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
+ "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
+ "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
+ "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
+ "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
+ "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
+ "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
+ "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
+ "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
+ "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
+ "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
+ "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
+ "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
+ "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
+ "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
+ "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
+ "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
+ "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
+ "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
+ "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
+ "9945cb5c7d")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
+ }
+
+ publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
+ "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
+ "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
+ "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
+ "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
+ "80a89e43f29b570203010001")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
+ }
+
+ // Generate an asymmetric key
+ asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
+ return
+ }
+
+ testCases := []struct {
+ buf []byte
+ encKey encrypt.Key
+ }{
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+ args["cbcMaterials"] = cbcMaterials
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
+ return
+ }
+
+ // Put encrypted data
+ _, err = c.PutEncryptedObject(bucketName, objectName, bytes.NewReader(testCase.buf), cbcMaterials)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// TestEncryptionFPut tests client side encryption
+func testEncryptionFPut() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutEncryptedObject(bucketName, objectName, filePath, contentType, cbcMaterials)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "filePath": "",
+ "contentType": "",
+ "cbcMaterials": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate a symmetric key
+ symKey := encrypt.NewSymmetricKey([]byte("my-secret-key-00"))
+
+ // Generate an assymmetric key from predefine public and private certificates
+ privateKey, err := hex.DecodeString(
+ "30820277020100300d06092a864886f70d0101010500048202613082025d" +
+ "0201000281810087b42ea73243a3576dc4c0b6fa245d339582dfdbddc20c" +
+ "bb8ab666385034d997210c54ba79275c51162a1221c3fb1a4c7c61131ca6" +
+ "5563b319d83474ef5e803fbfa7e52b889e1893b02586b724250de7ac6351" +
+ "cc0b7c638c980acec0a07020a78eed7eaa471eca4b92071394e061346c06" +
+ "15ccce2f465dee2080a89e43f29b5702030100010281801dd5770c3af8b3" +
+ "c85cd18cacad81a11bde1acfac3eac92b00866e142301fee565365aa9af4" +
+ "57baebf8bb7711054d071319a51dd6869aef3848ce477a0dc5f0dbc0c336" +
+ "5814b24c820491ae2bb3c707229a654427e03307fec683e6b27856688f08" +
+ "bdaa88054c5eeeb773793ff7543ee0fb0e2ad716856f2777f809ef7e6fa4" +
+ "41024100ca6b1edf89e8a8f93cce4b98c76c6990a09eb0d32ad9d3d04fbf" +
+ "0b026fa935c44f0a1c05dd96df192143b7bda8b110ec8ace28927181fd8c" +
+ "d2f17330b9b63535024100aba0260afb41489451baaeba423bee39bcbd1e" +
+ "f63dd44ee2d466d2453e683bf46d019a8baead3a2c7fca987988eb4d565e" +
+ "27d6be34605953f5034e4faeec9bdb0241009db2cb00b8be8c36710aff96" +
+ "6d77a6dec86419baca9d9e09a2b761ea69f7d82db2ae5b9aae4246599bb2" +
+ "d849684d5ab40e8802cfe4a2b358ad56f2b939561d2902404e0ead9ecafd" +
+ "bb33f22414fa13cbcc22a86bdf9c212ce1a01af894e3f76952f36d6c904c" +
+ "bd6a7e0de52550c9ddf31f1e8bfe5495f79e66a25fca5c20b3af5b870241" +
+ "0083456232aa58a8c45e5b110494599bda8dbe6a094683a0539ddd24e19d" +
+ "47684263bbe285ad953d725942d670b8f290d50c0bca3d1dc9688569f1d5" +
+ "9945cb5c7d")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
+ }
+
+ publicKey, err := hex.DecodeString("30819f300d06092a864886f70d010101050003818d003081890281810087" +
+ "b42ea73243a3576dc4c0b6fa245d339582dfdbddc20cbb8ab666385034d9" +
+ "97210c54ba79275c51162a1221c3fb1a4c7c61131ca65563b319d83474ef" +
+ "5e803fbfa7e52b889e1893b02586b724250de7ac6351cc0b7c638c980ace" +
+ "c0a07020a78eed7eaa471eca4b92071394e061346c0615ccce2f465dee20" +
+ "80a89e43f29b570203010001")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "DecodeString for symmetric Key generation failed", err)
+ return
+ }
+
+ // Generate an asymmetric key
+ asymKey, err := encrypt.NewAsymmetricKey(privateKey, publicKey)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewAsymmetricKey for symmetric Key generation failed", err)
+ return
+ }
+
+ // Object custom metadata
+ customContentType := "custom/contenttype"
+ args["metadata"] = customContentType
+
+ testCases := []struct {
+ buf []byte
+ encKey encrypt.Key
+ }{
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 15)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 17)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 31)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 33)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*2)},
+ {encKey: symKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 0)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 16)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 32)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024)},
+ {encKey: asymKey, buf: bytes.Repeat([]byte("F"), 1024*1024)},
+ }
+
+ for i, testCase := range testCases {
+ // Generate a random object name
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ // Secured object
+ cbcMaterials, err := encrypt.NewCBCSecureMaterials(testCase.encKey)
+ args["cbcMaterials"] = cbcMaterials
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewCBCSecureMaterials failed", err)
+ return
+ }
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ _, err = file.Write(testCase.buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ file.Close()
+ // Put encrypted data
+ if _, err = c.FPutEncryptedObject(bucketName, objectName, fileName, cbcMaterials); err != nil {
+ logError(testName, function, args, startTime, "", "FPutEncryptedObject failed", err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetEncryptedObject(bucketName, objectName, cbcMaterials)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetEncryptedObject failed", err)
+ return
+ }
+ defer r.Close()
+
+ // Compare the sent object with the received one
+ recvBuffer := bytes.NewBuffer([]byte{})
+ if _, err = io.Copy(recvBuffer, r); err != nil {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", error: "+err.Error(), err)
+ return
+ }
+ if recvBuffer.Len() != len(testCase.buf) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Number of bytes of received object does not match, expected "+string(len(testCase.buf))+", got "+string(recvBuffer.Len()), err)
+ return
+ }
+ if !bytes.Equal(testCase.buf, recvBuffer.Bytes()) {
+ logError(testName, function, args, startTime, "", "Test "+string(i+1)+", Encrypted sent is not equal to decrypted, expected "+string(testCase.buf)+", got "+string(recvBuffer.Bytes()), err)
+ return
+ }
+
+ if err = os.Remove(fileName); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testBucketNotification() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "SetBucketNotification(bucketName)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ }
+
+ if os.Getenv("NOTIFY_BUCKET") == "" ||
+ os.Getenv("NOTIFY_SERVICE") == "" ||
+ os.Getenv("NOTIFY_REGION") == "" ||
+ os.Getenv("NOTIFY_ACCOUNTID") == "" ||
+ os.Getenv("NOTIFY_RESOURCE") == "" {
+ ignoredLog(testName, function, args, startTime, "Skipped notification test as it is not configured").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ bucketName := os.Getenv("NOTIFY_BUCKET")
+ args["bucketName"] = bucketName
+
+ topicArn := minio.NewArn("aws", os.Getenv("NOTIFY_SERVICE"), os.Getenv("NOTIFY_REGION"), os.Getenv("NOTIFY_ACCOUNTID"), os.Getenv("NOTIFY_RESOURCE"))
+ queueArn := minio.NewArn("aws", "dummy-service", "dummy-region", "dummy-accountid", "dummy-resource")
+
+ topicConfig := minio.NewNotificationConfig(topicArn)
+
+ topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+ topicConfig.AddFilterSuffix("jpg")
+
+ queueConfig := minio.NewNotificationConfig(queueArn)
+ queueConfig.AddEvents(minio.ObjectCreatedAll)
+ queueConfig.AddFilterPrefix("photos/")
+
+ bNotification := minio.BucketNotification{}
+ bNotification.AddTopic(topicConfig)
+
+ // Add the same topicConfig again, should have no effect
+ // because it is duplicated
+ bNotification.AddTopic(topicConfig)
+ if len(bNotification.TopicConfigs) != 1 {
+ logError(testName, function, args, startTime, "", "Duplicate entry added", err)
+ return
+ }
+
+ // Add and remove a queue config
+ bNotification.AddQueue(queueConfig)
+ bNotification.RemoveQueueByArn(queueArn)
+
+ err = c.SetBucketNotification(bucketName, bNotification)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketNotification failed", err)
+ return
+ }
+
+ bNotification, err = c.GetBucketNotification(bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketNotification failed", err)
+ return
+ }
+
+ if len(bNotification.TopicConfigs) != 1 {
+ logError(testName, function, args, startTime, "", "Topic config is empty", err)
+ return
+ }
+
+ if bNotification.TopicConfigs[0].Filter.S3Key.FilterRules[0].Value != "jpg" {
+ logError(testName, function, args, startTime, "", "Couldn't get the suffix", err)
+ return
+ }
+
+ err = c.RemoveAllBucketNotification(bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveAllBucketNotification failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests comprehensive list of all methods.
+func testFunctional() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "testFunctional()"
+ function_all := ""
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.New(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, nil, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket.
+ function = "MakeBucket(bucketName, region)"
+ function_all = "MakeBucket(bucketName, region)"
+ args["bucketName"] = bucketName
+ err = c.MakeBucket(bucketName, "us-east-1")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File creation failed", err)
+ return
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File write failed", err)
+ return
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ function = "BucketExists(bucketName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ exists, err = c.BucketExists(bucketName)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
+ }
+ if !exists {
+ logError(testName, function, args, startTime, "", "Could not find the bucket", err)
+ return
+ }
+
+ // Asserting the default bucket policy.
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+ policyAccess, err := c.GetBucketPolicy(bucketName, "")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+ if policyAccess != "none" {
+ logError(testName, function, args, startTime, "", "policy should be set to none", err)
+ return
+ }
+
+ // Set the bucket policy to 'public readonly'.
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadOnly,
+ }
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadOnly)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `readonly`.
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+ if policyAccess != "readonly" {
+ logError(testName, function, args, startTime, "", "policy should be set to readonly", err)
+ return
+ }
+
+ // Make the bucket 'public writeonly'.
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyWriteOnly,
+ }
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyWriteOnly)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `writeonly`.
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+ if policyAccess != "writeonly" {
+ logError(testName, function, args, startTime, "", "policy should be set to writeonly", err)
+ return
+ }
+ // Make the bucket 'public read/write'.
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadWrite,
+ }
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+ // should return policy `readwrite`.
+ function = "GetBucketPolicy(bucketName, objectPrefix)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ }
+ policyAccess, err = c.GetBucketPolicy(bucketName, "")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetBucketPolicy failed", err)
+ return
+ }
+ if policyAccess != "readwrite" {
+ logError(testName, function, args, startTime, "", "policy should be set to readwrite", err)
+ return
+ }
+ // List all buckets.
+ function = "ListBuckets()"
+ function_all += ", " + function
+ args = nil
+ buckets, err := c.ListBuckets()
+
+ if len(buckets) == 0 {
+ logError(testName, function, args, startTime, "", "Found bucket list to be empty", err)
+ return
+ }
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ logError(testName, function, args, startTime, "", "Bucket: "+bucketName+" not found", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("f"), 1<<19)
+
+ function = "PutObject(bucketName, objectName, reader, contentType)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
+ return
+ }
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-nolength",
+ "contentType": "binary/octet-stream",
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Length doesn't match, expected "+string(int64(len(buf)))+" got "+string(n), err)
+ return
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
+ }
+
+ objFound = false
+ isRecursive = true // Recursive is true.
+ function = "ListObjectsV2(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Object "+objectName+" not found", err)
+ return
+ }
+
+ incompObjNotFound := true
+
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
+ }
+
+ function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "GetObject bytes mismatch", err)
+ return
+ }
+
+ function = "FGetObject(bucketName, objectName, fileName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+ err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObject failed", err)
+ return
+ }
+
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ if _, err = c.PresignedHeadObject(bucketName, "", 3600*time.Second, nil); err == nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject success", err)
+ return
+ }
+
+ // Generate presigned HEAD object url.
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ if resp.Header.Get("ETag") == "" {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject response incorrect", err)
+ return
+ }
+ resp.Body.Close()
+
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedGetObject(bucketName, "", 3600*time.Second, nil)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject success", err)
+ return
+ }
+
+ // Generate presigned GET object url.
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ "reqParams": reqParams,
+ }
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect, status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject response incorrect", err)
+ return
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch for presigned GET URL", err)
+ return
+ }
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received "+string(resp.Header.Get("Content-Disposition")), err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": "",
+ "expires": 3600 * time.Second,
+ }
+ _, err = c.PresignedPutObject(bucketName, "", 3600*time.Second)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject success", err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ "expires": 3600 * time.Second,
+ }
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ buf = bytes.Repeat([]byte("g"), 1<<19)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Couldn't make HTTP request with PresignedPutObject URL", err)
+ return
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject after PresignedPutObject failed", err)
+ return
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll after GetObject failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ function = "RemoveObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ err = c.RemoveObject(bucketName, objectName)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+ args["objectName"] = objectName + "-f"
+ err = c.RemoveObject(bucketName, objectName+"-f")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ args["objectName"] = objectName + "-nolength"
+ err = c.RemoveObject(bucketName, objectName+"-nolength")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ args["objectName"] = objectName + "-presigned"
+ err = c.RemoveObject(bucketName, objectName+"-presigned")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveObject failed", err)
+ return
+ }
+
+ function = "RemoveBucket(bucketName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ err = c.RemoveBucket(bucketName)
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
+ }
+ err = c.RemoveBucket(bucketName)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "RemoveBucket did not fail for invalid bucket name", err)
+ return
+ }
+ if err.Error() != "The specified bucket does not exist" {
+ logError(testName, function, args, startTime, "", "RemoveBucket failed", err)
+ return
+ }
+
+ if err = os.Remove(fileName); err != nil {
+ logError(testName, function, args, startTime, "", "File Remove failed", err)
+ return
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ logError(testName, function, args, startTime, "", "File Remove failed", err)
+ return
+ }
+ successLogger(testName, function_all, args, startTime).Info()
+}
+
+// Test for validating GetObject Reader* methods functioning when the
+// object is modified in the object store.
+func testGetObjectModified() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer c.RemoveBucket(bucketName)
+
+ // Upload an object.
+ objectName := "myobject"
+ args["objectName"] = objectName
+ content := "helloworld"
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(content), int64(len(content)), minio.PutObjectOptions{ContentType: "application/text"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
+ }
+
+ defer c.RemoveObject(bucketName, objectName)
+
+ reader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to GetObject "+objectName+", from bucket "+bucketName, err)
+ return
+ }
+ defer reader.Close()
+
+ // Read a few bytes of the object.
+ b := make([]byte, 5)
+ n, err := reader.ReadAt(b, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to read object "+objectName+", from bucket "+bucketName+" at an offset", err)
+ return
+ }
+
+ // Upload different contents to the same object while object is being read.
+ newContent := "goodbyeworld"
+ _, err = c.PutObject(bucketName, objectName, strings.NewReader(newContent), int64(len(newContent)), minio.PutObjectOptions{ContentType: "application/text"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Failed to upload "+objectName+", to bucket "+bucketName, err)
+ return
+ }
+
+ // Confirm that a Stat() call in between doesn't change the Object's cached etag.
+ _, err = reader.Stat()
+ expectedError := "At least one of the pre-conditions you specified did not hold"
+ if err.Error() != expectedError {
+ logError(testName, function, args, startTime, "", "Expected Stat to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
+ }
+
+ // Read again only to find object contents have been modified since last read.
+ _, err = reader.ReadAt(b, int64(n))
+ if err.Error() != expectedError {
+ logError(testName, function, args, startTime, "", "Expected ReadAt to fail with error "+expectedError+", but received "+err.Error(), err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test validates putObject to upload a file seeked at a given offset.
+func testPutObjectUploadSeekedObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, fileToUpload, contentType)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileToUpload": "",
+ "contentType": "binary/octet-stream",
+ }
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer c.RemoveBucket(bucketName)
+
+ var tempfile *os.File
+
+ if fileName := getMintDataDirFilePath("datafile-100-kB"); fileName != "" {
+ tempfile, err = os.Open(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File open failed", err)
+ return
+ }
+ args["fileToUpload"] = fileName
+ } else {
+ tempfile, err = ioutil.TempFile("", "minio-go-upload-test-")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile create failed", err)
+ return
+ }
+ args["fileToUpload"] = tempfile.Name()
+
+ // Generate 100kB data
+ if _, err = io.Copy(tempfile, getDataReader("datafile-100-kB")); err != nil {
+ logError(testName, function, args, startTime, "", "File copy failed", err)
+ return
+ }
+
+ defer os.Remove(tempfile.Name())
+
+ // Seek back to the beginning of the file.
+ tempfile.Seek(0, 0)
+ }
+ var length = 100 * humanize.KiByte
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ offset := length / 2
+ if _, err = tempfile.Seek(int64(offset), 0); err != nil {
+ logError(testName, function, args, startTime, "", "TempFile seek failed", err)
+ return
+ }
+
+ n, err := c.PutObject(bucketName, objectName, tempfile, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if n != int64(length-offset) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid length returned, expected %d got %d", int64(length-offset), n), err)
+ return
+ }
+ tempfile.Close()
+
+ obj, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ n, err = obj.Seek(int64(offset), 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != int64(offset) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(offset), n), err)
+ return
+ }
+
+ n, err = c.PutObject(bucketName, objectName+"getobject", obj, int64(length-offset), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if n != int64(length-offset) {
+ logError(testName, function, args, startTime, "", fmt.Sprintf("Invalid offset returned, expected %d got %d", int64(length-offset), n), err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests bucket re-create errors.
+func testMakeBucketErrorV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ region := "eu-west-1"
+ args["bucketName"] = bucketName
+ args["region"] = region
+
+ // Make a new bucket in 'eu-west-1'.
+ if err = c.MakeBucket(bucketName, region); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ if err = c.MakeBucket(bucketName, region); err == nil {
+ logError(testName, function, args, startTime, "", "MakeBucket did not fail for existing bucket name", err)
+ return
+ }
+ // Verify valid error response from server.
+ if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
+ minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
+ logError(testName, function, args, startTime, "", "Invalid error returned by server", err)
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test get object reader to not throw error on being closed twice.
+func testGetObjectClosedTwiceV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if err := r.Close(); err == nil {
+ logError(testName, function, args, startTime, "", "Object is already closed, should return error", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests removing partially uploaded objects.
+func testRemovePartiallyUploadedV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "RemoveIncompleteUpload(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Enable tracing, write to stdout.
+ // c.TraceOn(os.Stderr)
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
+
+ reader, writer := io.Pipe()
+ go func() {
+ i := 0
+ for i < 25 {
+ _, cerr := io.CopyN(writer, r, 128*1024)
+ if cerr != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", cerr)
+ return
+ }
+ i++
+ r.Seek(0, 0)
+ }
+ writer.CloseWithError(errors.New("proactively closed to be verified later"))
+ }()
+
+ objectName := bucketName + "-resumable"
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "PutObject should fail", err)
+ return
+ }
+ if err.Error() != "proactively closed to be verified later" {
+ logError(testName, function, args, startTime, "", "Unexpected error, expected : proactively closed to be verified later", err)
+ return
+ }
+ err = c.RemoveIncompleteUpload(bucketName, objectName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "RemoveIncompleteUpload failed", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests FPutObject hidden contentType setting
+func testFPutObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FPutObject(bucketName, objectName, fileName, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Make a temp file with 11*1024*1024 bytes of data.
+ file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "TempFile creation failed", err)
+ return
+ }
+
+ r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
+ n, err := io.CopyN(file, r, 11*1024*1024)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ if n != int64(11*1024*1024) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
+ }
+
+ // Close the file pro-actively for windows.
+ err = file.Close()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File close failed", err)
+ return
+ }
+
+ // Set base object name
+ objectName := bucketName + "FPutObject"
+ args["objectName"] = objectName
+ args["fileName"] = file.Name()
+
+ // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
+ n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), minio.PutObjectOptions{ContentType: "application/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(11*1024*1024) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+
+ n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(11*1024*1024) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
+ }
+
+ // Add extension to temp file name
+ fileName := file.Name()
+ err = os.Rename(file.Name(), fileName+".gtar")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Rename failed", err)
+ return
+ }
+
+ // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
+ args["objectName"] = objectName + "-Octet"
+ args["contentType"] = ""
+ args["fileName"] = fileName + ".gtar"
+
+ n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FPutObject failed", err)
+ return
+ }
+ if n != int64(11*1024*1024) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(11*1024*1024))+" got "+string(n), err)
+ return
+ }
+
+ // Check headers
+ rStandard, err := c.StatObject(bucketName, objectName+"-standard", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rStandard.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rStandard.ContentType, err)
+ return
+ }
+
+ rOctet, err := c.StatObject(bucketName, objectName+"-Octet", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rOctet.ContentType != "application/octet-stream" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/octet-stream , got "+rOctet.ContentType, err)
+ return
+ }
+
+ rGTar, err := c.StatObject(bucketName, objectName+"-GTar", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+ if rGTar.ContentType != "application/x-gtar" {
+ logError(testName, function, args, startTime, "", "Content-Type headers mismatched, expected: application/x-gtar , got "+rGTar.ContentType, err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ err = os.Remove(fileName + ".gtar")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests various bucket supported formats.
+func testMakeBucketRegionsV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "MakeBucket(bucketName, region)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "region": "eu-west-1",
+ }
+
+ if os.Getenv(serverEndpoint) != "s3.amazonaws.com" {
+ ignoredLog(testName, function, args, startTime, "Skipped region functional tests for non s3 runs").Info()
+ return
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket in 'eu-central-1'.
+ if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ // Make a new bucket with '.' in its name, in 'us-west-2'. This
+ // request is internally staged into a path style instead of
+ // virtual host style.
+ if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
+ args["bucketName"] = bucketName + ".withperiod"
+ args["region"] = "us-west-2"
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName+".withperiod", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests get object ReaderSeeker interface methods.
+func testGetObjectReadSeekFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data.
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(int64(bufSize))+" got "+string(st.Size), err)
+ return
+ }
+
+ offset := int64(2048)
+ n, err = r.Seek(offset, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
+ }
+ n, err = r.Seek(0, 1)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset)+" got "+string(n), err)
+ return
+ }
+ _, err = r.Seek(offset, 2)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "Seek on positive offset for whence '2' should error out", err)
+ return
+ }
+ n, err = r.Seek(-offset, 2)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != st.Size-offset {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(st.Size-offset)+" got "+string(n), err)
+ return
+ }
+
+ var buffer1 bytes.Buffer
+ if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ }
+ if !bytes.Equal(buf[len(buf)-int(offset):], buffer1.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+
+ // Seek again and read again.
+ n, err = r.Seek(offset-1, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Seek failed", err)
+ return
+ }
+ if n != (offset - 1) {
+ logError(testName, function, args, startTime, "", "Number of seeked bytes does not match, expected "+string(offset-1)+" got "+string(n), err)
+ return
+ }
+
+ var buffer2 bytes.Buffer
+ if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "Copy failed", err)
+ return
+ }
+ }
+ // Verify now lesser bytes.
+ if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
+ logError(testName, function, args, startTime, "", "Incorrect read bytes v/s original buffer", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests get object ReaderAt interface methods.
+func testGetObjectReadAtFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObject(bucketName, objectName)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ buf, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ // Save the data
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(n), err)
+ return
+ }
+
+ // Read the data back
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+
+ offset := int64(2048)
+
+ // Read directly
+ buf2 := make([]byte, 512)
+ buf3 := make([]byte, 512)
+ buf4 := make([]byte, 512)
+
+ m, err := r.ReadAt(buf2, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf2) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf2))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf2, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf3, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf3) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf3))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf3, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+ offset += 512
+ m, err = r.ReadAt(buf4, offset)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ if m != len(buf4) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf4))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf4, buf[offset:offset+512]) {
+ logError(testName, function, args, startTime, "", "Incorrect read between two ReadAt from same offset", err)
+ return
+ }
+
+ buf5 := make([]byte, n)
+ // Read the whole object.
+ m, err = r.ReadAt(buf5, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ if m != len(buf5) {
+ logError(testName, function, args, startTime, "", "ReadAt read shorter bytes before reaching EOF, expected "+string(len(buf5))+" got "+string(m), err)
+ return
+ }
+ if !bytes.Equal(buf, buf5) {
+ logError(testName, function, args, startTime, "", "Incorrect data read in GetObject, than what was previously uploaded", err)
+ return
+ }
+
+ buf6 := make([]byte, n+1)
+ // Read the whole object and beyond.
+ _, err = r.ReadAt(buf6, 0)
+ if err != nil {
+ if err != io.EOF {
+ logError(testName, function, args, startTime, "", "ReadAt failed", err)
+ return
+ }
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Tests copy object
+func testCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Make a new bucket in 'us-east-1' (destination bucket).
+ err = c.MakeBucket(bucketName+"-copy", "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate 33K of data.
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ n, err := c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(int64(bufSize))+" got "+string(n), err)
+ return
+ }
+
+ r, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+
+ // Copy Source
+ src := minio.NewSourceInfo(bucketName, objectName, nil)
+ args["source"] = src
+
+ // Set copy conditions.
+
+ // All invalid conditions first.
+ err = src.SetModifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetUnmodifiedSinceCond(time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC))
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetMatchETagCond("")
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagCond did not fail for invalid conditions", err)
+ return
+ }
+ err = src.SetMatchETagExceptCond("")
+ if err == nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond did not fail for invalid conditions", err)
+ return
+ }
+
+ err = src.SetModifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetModifiedSinceCond failed", err)
+ return
+ }
+ err = src.SetMatchETagCond(objInfo.ETag)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagCond failed", err)
+ return
+ }
+
+ dst, err := minio.NewDestinationInfo(bucketName+"-copy", objectName+"-copy", nil, nil)
+ args["destination"] = dst
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ // Perform the Copy
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // Source object
+ r, err = c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Destination object
+ readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ // Check the various fields of source object against destination object.
+ objInfo, err = r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ objInfoCopy, err := readerCopy.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ if objInfo.Size != objInfoCopy.Size {
+ logError(testName, function, args, startTime, "", "Number of bytes does not match, expected "+string(objInfoCopy.Size)+" got "+string(objInfo.Size), err)
+ return
+ }
+
+ // CopyObject again but with wrong conditions
+ src = minio.NewSourceInfo(bucketName, objectName, nil)
+ err = src.SetUnmodifiedSinceCond(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetUnmodifiedSinceCond failed", err)
+ return
+ }
+ err = src.SetMatchETagExceptCond(objInfo.ETag)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetMatchETagExceptCond failed", err)
+ return
+ }
+
+ // Perform the Copy which should fail
+ err = c.CopyObject(dst, src)
+ if err == nil {
+ logError(testName, function, args, startTime, "", "CopyObject did not fail for invalid conditions", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ if err = cleanupBucket(bucketName+"-copy", c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testComposeObjectErrorCasesWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Test that more than 10K source objects cannot be
+ // concatenated.
+ srcArr := [10001]minio.SourceInfo{}
+ srcSlice := srcArr[:]
+ dst, err := minio.NewDestinationInfo(bucketName, "object", nil, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ args["destination"] = dst
+ // Just explain about srcArr in args["sourceList"]
+ // to stop having 10,001 null headers logged
+ args["sourceList"] = "source array of 10,001 elements"
+ if err := c.ComposeObject(dst, srcSlice); err == nil {
+ logError(testName, function, args, startTime, "", "Expected error in ComposeObject", err)
+ return
+ } else if err.Error() != "There must be as least one and up to 10000 source objects." {
+ logError(testName, function, args, startTime, "", "Got unexpected error", err)
+ return
+ }
+
+ // Create a source with invalid offset spec and check that
+ // error is returned:
+ // 1. Create the source object.
+ const badSrcSize = 5 * 1024 * 1024
+ buf := bytes.Repeat([]byte("1"), badSrcSize)
+ _, err = c.PutObject(bucketName, "badObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ // 2. Set invalid range spec on the object (going beyond
+ // object size)
+ badSrc := minio.NewSourceInfo(bucketName, "badObject", nil)
+ err = badSrc.SetRange(1, badSrcSize)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Setting NewSourceInfo failed", err)
+ return
+ }
+ // 3. ComposeObject call should fail.
+ if err := c.ComposeObject(dst, []minio.SourceInfo{badSrc}); err == nil {
+ logError(testName, function, args, startTime, "", "ComposeObject expected to fail", err)
+ return
+ } else if !strings.Contains(err.Error(), "has invalid segment-to-copy") {
+ logError(testName, function, args, startTime, "", "Got invalid error", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCasesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+func testComposeMultipleSources(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{
+ "destination": "",
+ "sourceList": "",
+ }
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Upload a small source object
+ const srcSize = 1024 * 1024 * 5
+ buf := bytes.Repeat([]byte("1"), srcSize)
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(srcSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ // We will append 10 copies of the object.
+ srcs := []minio.SourceInfo{}
+ for i := 0; i < 10; i++ {
+ srcs = append(srcs, minio.NewSourceInfo(bucketName, "srcObject", nil))
+ }
+ // make the last part very small
+ err = srcs[9].SetRange(0, 0)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetRange failed", err)
+ return
+ }
+ args["sourceList"] = srcs
+
+ dst, err := minio.NewDestinationInfo(bucketName, "dstObject", nil, nil)
+ args["destination"] = dst
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ err = c.ComposeObject(dst, srcs)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ objProps, err := c.StatObject(bucketName, "dstObject", minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "StatObject failed", err)
+ return
+ }
+
+ if objProps.Size != 9*srcSize+1 {
+ logError(testName, function, args, startTime, "", "Size mismatched! Expected "+string(10000*srcSize)+" got "+string(objProps.Size), err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test concatenating multiple objects objects
+func testCompose10KSourcesV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ testComposeMultipleSources(c)
+}
+
+func testEncryptedCopyObjectWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ key1 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven1"), "AES256")
+ key2 := minio.NewSSEInfo([]byte("32byteslongsecretkeymustbegiven2"), "AES256")
+
+ // 1. create an sse-c encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(map[string]string)
+ for k, v := range key1.GetSSEHeaders() {
+ metadata[k] = v
+ }
+ _, err = c.PutObject(bucketName, "srcObject", bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: metadata, Progress: nil})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ // 2. copy object and change encryption key
+ src := minio.NewSourceInfo(bucketName, "srcObject", &key1)
+ args["source"] = src
+ dst, err := minio.NewDestinationInfo(bucketName, "dstObject", &key2, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ args["destination"] = dst
+
+ err = c.CopyObject(dst, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ // 3. get copied object and check if content is equal
+ opts := minio.GetObjectOptions{}
+ for k, v := range key2.GetSSEHeaders() {
+ opts.Set(k, v)
+ }
+ coreClient := minio.Core{c}
+ reader, _, err := coreClient.GetObject(bucketName, "dstObject", opts)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+ defer reader.Close()
+
+ decBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(decBytes, buf) {
+ logError(testName, function, args, startTime, "", "Downloaded object mismatched for encrypted object", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObject() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ // c.TraceOn(os.Stderr)
+ testEncryptedCopyObjectWrapper(c)
+}
+
+// Test encrypted copy object
+func testEncryptedCopyObjectV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio v2 client object creation failed", err)
+ return
+ }
+
+ testEncryptedCopyObjectWrapper(c)
+}
+
+func testUserMetadataCopying() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+func testUserMetadataCopyingWrapper(c *minio.Client) {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ // Make a new bucket in 'us-east-1' (source bucket).
+ err := c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ fetchMeta := func(object string) (h http.Header) {
+ objInfo, err := c.StatObject(bucketName, object, minio.StatObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Stat failed", err)
+ return
+ }
+ h = make(http.Header)
+ for k, vs := range objInfo.Metadata {
+ if strings.HasPrefix(strings.ToLower(k), "x-amz-meta-") {
+ for _, v := range vs {
+ h.Add(k, v)
+ }
+ }
+ }
+ return h
+ }
+
+ // 1. create a client encrypted object to copy by uploading
+ const srcSize = 1024 * 1024
+ buf := bytes.Repeat([]byte("abcde"), srcSize) // gives a buffer of 5MiB
+ metadata := make(http.Header)
+ metadata.Set("x-amz-meta-myheader", "myvalue")
+ m := make(map[string]string)
+ m["x-amz-meta-myheader"] = "myvalue"
+ _, err = c.PutObject(bucketName, "srcObject",
+ bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{UserMetadata: m})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithMetadata failed", err)
+ return
+ }
+ if !reflect.DeepEqual(metadata, fetchMeta("srcObject")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 2. create source
+ src := minio.NewSourceInfo(bucketName, "srcObject", nil)
+ // 2.1 create destination with metadata set
+ dst1, err := minio.NewDestinationInfo(bucketName, "dstObject-1", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ // 3. Check that copying to an object with metadata set resets
+ // the headers on the copy.
+ args["source"] = src
+ args["destination"] = dst1
+ err = c.CopyObject(dst1, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ expectedHeaders := make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-1")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 4. create destination with no metadata set and same source
+ dst2, err := minio.NewDestinationInfo(bucketName, "dstObject-2", nil, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+ src = minio.NewSourceInfo(bucketName, "srcObject", nil)
+
+ // 5. Check that copying to an object with no metadata set,
+ // copies metadata.
+ args["source"] = src
+ args["destination"] = dst2
+ err = c.CopyObject(dst2, src)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "CopyObject failed", err)
+ return
+ }
+
+ expectedHeaders = metadata
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-2")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 6. Compose a pair of sources.
+ srcs := []minio.SourceInfo{
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst3, err := minio.NewDestinationInfo(bucketName, "dstObject-3", nil, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ function = "ComposeObject(destination, sources)"
+ args["source"] = srcs
+ args["destination"] = dst3
+ err = c.ComposeObject(dst3, srcs)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ // Check that no headers are copied in this case
+ if !reflect.DeepEqual(make(http.Header), fetchMeta("dstObject-3")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // 7. Compose a pair of sources with dest user metadata set.
+ srcs = []minio.SourceInfo{
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ minio.NewSourceInfo(bucketName, "srcObject", nil),
+ }
+ dst4, err := minio.NewDestinationInfo(bucketName, "dstObject-4", nil, map[string]string{"notmyheader": "notmyvalue"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "NewDestinationInfo failed", err)
+ return
+ }
+
+ function = "ComposeObject(destination, sources)"
+ args["source"] = srcs
+ args["destination"] = dst4
+ err = c.ComposeObject(dst4, srcs)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ComposeObject failed", err)
+ return
+ }
+
+ // Check that no headers are copied in this case
+ expectedHeaders = make(http.Header)
+ expectedHeaders.Set("x-amz-meta-notmyheader", "notmyvalue")
+ if !reflect.DeepEqual(expectedHeaders, fetchMeta("dstObject-4")) {
+ logError(testName, function, args, startTime, "", "Metadata match failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+func testUserMetadataCopyingV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "CopyObject(destination, source)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // c.TraceOn(os.Stderr)
+ testUserMetadataCopyingWrapper(c)
+}
+
+// Test put object with size -1 byte object.
+func testPutObjectNoLengthV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": -1,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+
+ bufSize := dataFileMap["datafile-65-MB"]
+ var reader = getDataReader("datafile-65-MB")
+ defer reader.Close()
+ args["size"] = bufSize
+
+ // Upload an object.
+ n, err := c.PutObject(bucketName, objectName, reader, -1, minio.PutObjectOptions{})
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
+ }
+ if n != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(bufSize)+" got "+string(n), err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test put objects of unknown size.
+func testPutObjectsUnknownV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader,size,opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": "",
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Issues are revealed by trying to upload multiple files of unknown size
+ // sequentially (on 4GB machines)
+ for i := 1; i <= 4; i++ {
+ // Simulate that we could be receiving byte slices of data that we want
+ // to upload as a file
+ rpipe, wpipe := io.Pipe()
+ defer rpipe.Close()
+ go func() {
+ b := []byte("test")
+ wpipe.Write(b)
+ wpipe.Close()
+ }()
+
+ // Upload the object.
+ objectName := fmt.Sprintf("%sunique%d", bucketName, i)
+ args["objectName"] = objectName
+
+ n, err := c.PutObject(bucketName, objectName, rpipe, -1, minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectStreaming failed", err)
+ return
+ }
+ args["size"] = n
+ if n != int64(4) {
+ logError(testName, function, args, startTime, "", "Expected upload object size "+string(4)+" got "+string(n), err)
+ return
+ }
+
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test put object with 0 byte object.
+func testPutObject0ByteV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObject(bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "bucketName": "",
+ "objectName": "",
+ "size": 0,
+ "opts": "",
+ }
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+ args["objectName"] = objectName
+ args["opts"] = minio.PutObjectOptions{}
+
+ // Upload an object.
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader([]byte("")), 0, minio.PutObjectOptions{})
+
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithSize failed", err)
+ return
+ }
+ if n != 0 {
+ logError(testName, function, args, startTime, "", "Expected upload object size 0 but got "+string(n), err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+}
+
+// Test expected error cases
+func testComposeObjectErrorCases() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ testComposeObjectErrorCasesWrapper(c)
+}
+
+// Test concatenating 10K objects
+func testCompose10KSources() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "ComposeObject(destination, sourceList)"
+ args := map[string]interface{}{}
+
+ // Instantiate new minio client object
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client object creation failed", err)
+ return
+ }
+
+ testComposeMultipleSources(c)
+}
+
+// Tests comprehensive list of all methods.
+func testFunctionalV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "testFunctionalV2()"
+ function_all := ""
+ args := map[string]interface{}{}
+
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable to debug
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ location := "us-east-1"
+ // Make a new bucket.
+ function = "MakeBucket(bucketName, location)"
+ function_all = "MakeBucket(bucketName, location)"
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "location": location,
+ }
+ err = c.MakeBucket(bucketName, location)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ // Generate a random file name.
+ fileName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ file, err := os.Create(fileName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file create failed", err)
+ return
+ }
+ for i := 0; i < 3; i++ {
+ buf := make([]byte, rand.Intn(1<<19))
+ _, err = file.Write(buf)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "file write failed", err)
+ return
+ }
+ }
+ file.Close()
+
+ // Verify if bucket exits and you have access.
+ var exists bool
+ function = "BucketExists(bucketName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ }
+ exists, err = c.BucketExists(bucketName)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "BucketExists failed", err)
+ return
+ }
+ if !exists {
+ logError(testName, function, args, startTime, "", "Could not find existing bucket "+bucketName, err)
+ return
+ }
+
+ // Make the bucket 'public read/write'.
+ function = "SetBucketPolicy(bucketName, objectPrefix, bucketPolicy)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectPrefix": "",
+ "bucketPolicy": policy.BucketPolicyReadWrite,
+ }
+ err = c.SetBucketPolicy(bucketName, "", policy.BucketPolicyReadWrite)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err)
+ return
+ }
+
+ // List all buckets.
+ function = "ListBuckets()"
+ function_all += ", " + function
+ args = nil
+ buckets, err := c.ListBuckets()
+ if len(buckets) == 0 {
+ logError(testName, function, args, startTime, "", "List buckets cannot be empty", err)
+ return
+ }
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ListBuckets failed", err)
+ return
+ }
+
+ // Verify if previously created bucket is listed in list buckets.
+ bucketFound := false
+ for _, bucket := range buckets {
+ if bucket.Name == bucketName {
+ bucketFound = true
+ }
+ }
+
+ // If bucket not found error out.
+ if !bucketFound {
+ logError(testName, function, args, startTime, "", "Bucket "+bucketName+"not found", err)
+ return
+ }
+
+ objectName := bucketName + "unique"
+
+ // Generate data
+ buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
+
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "contentType": "",
+ }
+ n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+ if n != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
+ return
+ }
+
+ objectName_noLength := objectName + "-nolength"
+ args["objectName"] = objectName_noLength
+ n, err = c.PutObject(bucketName, objectName_noLength, bytes.NewReader(buf), int64(len(buf)), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ if n != int64(len(buf)) {
+ logError(testName, function, args, startTime, "", "Expected uploaded object length "+string(len(buf))+" got "+string(n), err)
+ return
+ }
+
+ // Instantiate a done channel to close all listing.
+ doneCh := make(chan struct{})
+ defer close(doneCh)
+
+ objFound := false
+ isRecursive := true // Recursive is true.
+ function = "ListObjects(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+ for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
+ if obj.Key == objectName {
+ objFound = true
+ break
+ }
+ }
+ if !objFound {
+ logError(testName, function, args, startTime, "", "Could not find existing object "+objectName, err)
+ return
+ }
+
+ incompObjNotFound := true
+ function = "ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "isRecursive": isRecursive,
+ }
+ for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
+ if objIncompl.Key != "" {
+ incompObjNotFound = false
+ break
+ }
+ }
+ if !incompObjNotFound {
+ logError(testName, function, args, startTime, "", "Unexpected dangling incomplete upload found", err)
+ return
+ }
+
+ function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ }
+ newReader, err := c.GetObject(bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ newReadBytes, err := ioutil.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ function = "FGetObject(bucketName, objectName, fileName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "fileName": fileName + "-f",
+ }
+ err = c.FGetObject(bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FgetObject failed", err)
+ return
+ }
+
+ // Generate presigned HEAD object url.
+ function = "PresignedHeadObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedHeadURL, err := c.PresignedHeadObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject failed", err)
+ return
+ }
+ // Verify if presigned url works.
+ resp, err := http.Head(presignedHeadURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL head request failed", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedHeadObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ if resp.Header.Get("ETag") == "" {
+ logError(testName, function, args, startTime, "", "Got empty ETag", err)
+ return
+ }
+ resp.Body.Close()
+
+ // Generate presigned GET object url.
+ function = "PresignedGetObject(bucketName, objectName, expires, reqParams)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName,
+ "expires": 3600 * time.Second,
+ }
+ presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ resp.Body.Close()
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ // Set request parameters.
+ reqParams := make(url.Values)
+ reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
+ // Generate presigned GET object url.
+ args["reqParams"] = reqParams
+ presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject failed", err)
+ return
+ }
+ // Verify if presigned url works.
+ resp, err = http.Get(presignedGetURL.String())
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL GET request failed", err)
+ return
+ }
+ if resp.StatusCode != http.StatusOK {
+ logError(testName, function, args, startTime, "", "PresignedGetObject URL returns status "+string(resp.StatusCode), err)
+ return
+ }
+ newPresignedBytes, err = ioutil.ReadAll(resp.Body)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+ if !bytes.Equal(newPresignedBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+ // Verify content disposition.
+ if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
+ logError(testName, function, args, startTime, "", "wrong Content-Disposition received ", err)
+ return
+ }
+
+ function = "PresignedPutObject(bucketName, objectName, expires)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ "expires": 3600 * time.Second,
+ }
+ presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PresignedPutObject failed", err)
+ return
+ }
+
+ // Generate data more than 32K
+ buf = bytes.Repeat([]byte("1"), rand.Intn(1<<10)+32*1024)
+
+ req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
+ }
+ httpClient := &http.Client{
+ // Setting a sensible time out of 30secs to wait for response
+ // headers. Request is pro-actively cancelled after 30secs
+ // with no response.
+ Timeout: 30 * time.Second,
+ Transport: http.DefaultTransport,
+ }
+ resp, err = httpClient.Do(req)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "HTTP request to PresignedPutObject URL failed", err)
+ return
+ }
+
+ function = "GetObject(bucketName, objectName)"
+ function_all += ", " + function
+ args = map[string]interface{}{
+ "bucketName": bucketName,
+ "objectName": objectName + "-presigned",
+ }
+ newReader, err = c.GetObject(bucketName, objectName+"-presigned", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObject failed", err)
+ return
+ }
+
+ newReadBytes, err = ioutil.ReadAll(newReader)
+ if err != nil {
+ logError(testName, function, args, startTime, "", "ReadAll failed", err)
+ return
+ }
+
+ if !bytes.Equal(newReadBytes, buf) {
+ logError(testName, function, args, startTime, "", "Bytes mismatch", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ if err = os.Remove(fileName); err != nil {
+ logError(testName, function, args, startTime, "", "File remove failed", err)
+ return
+ }
+ if err = os.Remove(fileName + "-f"); err != nil {
+ logError(testName, function, args, startTime, "", "File removes failed", err)
+ return
+ }
+ successLogger(testName, function_all, args, startTime).Info()
+}
+
+// Test get object with GetObjectWithContext
+func testGetObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectWithContext(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
+ return
+ }
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match: want "+string(bufSize)+", got"+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", "object Close() call failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with FGetObjectWithContext
+func testFGetObjectWithContext() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObjectWithContext(ctx, bucketName, objectName, fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV4(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v4 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-1-MB"]
+ var reader = getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext with long timeout failed", err)
+ return
+ }
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test validates putObject with context to see if request cancellation is honored for V2.
+func testPutObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "PutObjectWithContext(ctx, bucketName, objectName, reader, size, opts)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "size": "",
+ "opts": "",
+ }
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Make a new bucket.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+ defer c.RemoveBucket(bucketName)
+ bufSize := dataFileMap["datatfile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+
+ objectName := fmt.Sprintf("test-file-%v", rand.Uint32())
+ args["objectName"] = objectName
+
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ args["ctx"] = ctx
+ args["size"] = bufSize
+ defer cancel()
+
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with short timeout failed", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ args["ctx"] = ctx
+
+ defer cancel()
+ reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ _, err = c.PutObjectWithContext(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObjectWithContext with long timeout failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with GetObjectWithContext
+func testGetObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "GetObjectWithContext(ctx, bucketName, objectName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datafile-33-kB"]
+ var reader = getDataReader("datafile-33-kB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ r, err := c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext failed unexpectedly", err)
+ return
+ }
+ if _, err = r.Stat(); err == nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext should fail on short timeout", err)
+ return
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ r, err = c.GetObjectWithContext(ctx, bucketName, objectName, minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "GetObjectWithContext shouldn't fail on longer timeout", err)
+ return
+ }
+
+ st, err := r.Stat()
+ if err != nil {
+ logError(testName, function, args, startTime, "", "object Stat call failed", err)
+ return
+ }
+ if st.Size != int64(bufSize) {
+ logError(testName, function, args, startTime, "", "Number of bytes in stat does not match, expected "+string(bufSize)+" got "+string(st.Size), err)
+ return
+ }
+ if err := r.Close(); err != nil {
+ logError(testName, function, args, startTime, "", " object Close() call failed", err)
+ return
+ }
+
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Test get object with FGetObjectWithContext
+func testFGetObjectWithContextV2() {
+ // initialize logging params
+ startTime := time.Now()
+ testName := getFuncName()
+ function := "FGetObjectWithContext(ctx, bucketName, objectName,fileName)"
+ args := map[string]interface{}{
+ "ctx": "",
+ "bucketName": "",
+ "objectName": "",
+ "fileName": "",
+ }
+ // Seed random based on current time.
+ rand.Seed(time.Now().Unix())
+
+ // Instantiate new minio client object.
+ c, err := minio.NewV2(
+ os.Getenv(serverEndpoint),
+ os.Getenv(accessKey),
+ os.Getenv(secretKey),
+ mustParseBool(os.Getenv(enableHTTPS)),
+ )
+ if err != nil {
+ logError(testName, function, args, startTime, "", "Minio client v2 object creation failed", err)
+ return
+ }
+
+ // Enable tracing, write to stderr.
+ // c.TraceOn(os.Stderr)
+
+ // Set user agent.
+ c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
+
+ // Generate a new random bucket name.
+ bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-")
+ args["bucketName"] = bucketName
+
+ // Make a new bucket.
+ err = c.MakeBucket(bucketName, "us-east-1")
+ if err != nil {
+ logError(testName, function, args, startTime, "", "MakeBucket call failed", err)
+ return
+ }
+
+ bufSize := dataFileMap["datatfile-1-MB"]
+ var reader = getDataReader("datafile-1-MB")
+ defer reader.Close()
+ // Save the data
+ objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
+ args["objectName"] = objectName
+
+ _, err = c.PutObject(bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "PutObject call failed", err)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), 1*time.Nanosecond)
+ args["ctx"] = ctx
+ defer cancel()
+
+ fileName := "tempfile-context"
+ args["fileName"] = fileName
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-f", minio.GetObjectOptions{})
+ if err == nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext should fail on short timeout", err)
+ return
+ }
+ ctx, cancel = context.WithTimeout(context.Background(), 1*time.Hour)
+ defer cancel()
+
+ // Read the data back
+ err = c.FGetObjectWithContext(ctx, bucketName, objectName, fileName+"-fcontext", minio.GetObjectOptions{})
+ if err != nil {
+ logError(testName, function, args, startTime, "", "FGetObjectWithContext call shouldn't fail on long timeout", err)
+ return
+ }
+
+ if err = os.Remove(fileName + "-fcontext"); err != nil {
+ logError(testName, function, args, startTime, "", "Remove file failed", err)
+ return
+ }
+ // Delete all objects and buckets
+ if err = cleanupBucket(bucketName, c); err != nil {
+ logError(testName, function, args, startTime, "", "Cleanup failed", err)
+ return
+ }
+
+ successLogger(testName, function, args, startTime).Info()
+
+}
+
+// Convert string to bool and always return false if any error
+func mustParseBool(str string) bool {
+ b, err := strconv.ParseBool(str)
+ if err != nil {
+ return false
+ }
+ return b
+}
+
+func main() {
+ // Output to stdout instead of the default stderr
+ log.SetOutput(os.Stdout)
+ // create custom formatter
+ mintFormatter := mintJSONFormatter{}
+ // set custom formatter
+ log.SetFormatter(&mintFormatter)
+ // log Info or above -- success cases are Info level, failures are Fatal level
+ log.SetLevel(log.InfoLevel)
+
+ tls := mustParseBool(os.Getenv(enableHTTPS))
+ // execute tests
+ if !isQuickMode() {
+ testMakeBucketErrorV2()
+ testGetObjectClosedTwiceV2()
+ testRemovePartiallyUploadedV2()
+ testFPutObjectV2()
+ testMakeBucketRegionsV2()
+ testGetObjectReadSeekFunctionalV2()
+ testGetObjectReadAtFunctionalV2()
+ testCopyObjectV2()
+ testFunctionalV2()
+ testComposeObjectErrorCasesV2()
+ testCompose10KSourcesV2()
+ testUserMetadataCopyingV2()
+ testPutObject0ByteV2()
+ testPutObjectNoLengthV2()
+ testPutObjectsUnknownV2()
+ testGetObjectWithContextV2()
+ testFPutObjectWithContextV2()
+ testFGetObjectWithContextV2()
+ testPutObjectWithContextV2()
+ testMakeBucketError()
+ testMakeBucketRegions()
+ testPutObjectWithMetadata()
+ testPutObjectReadAt()
+ testPutObjectStreaming()
+ testListPartiallyUploaded()
+ testGetObjectSeekEnd()
+ testGetObjectClosedTwice()
+ testRemoveMultipleObjects()
+ testRemovePartiallyUploaded()
+ testFPutObjectMultipart()
+ testFPutObject()
+ testGetObjectReadSeekFunctional()
+ testGetObjectReadAtFunctional()
+ testPresignedPostPolicy()
+ testCopyObject()
+ testEncryptionPutGet()
+ testEncryptionFPut()
+ testComposeObjectErrorCases()
+ testCompose10KSources()
+ testUserMetadataCopying()
+ testBucketNotification()
+ testFunctional()
+ testGetObjectModified()
+ testPutObjectUploadSeekedObject()
+ testGetObjectWithContext()
+ testFPutObjectWithContext()
+ testFGetObjectWithContext()
+ testPutObjectWithContext()
+
+ // SSE-C tests will only work over TLS connection.
+ if tls {
+ testEncryptedCopyObjectV2()
+ testEncryptedCopyObject()
+ }
+ } else {
+ testFunctional()
+ testFunctionalV2()
+ }
+}
diff --git a/request-headers_test.go b/get-options_test.go
index f026cd0..c5344a0 100644
--- a/request-headers_test.go
+++ b/get-options_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -40,17 +41,17 @@ func TestSetHeader(t *testing.T) {
{1, -5, fmt.Errorf("Invalid range specified: start=1 end=-5"), ""},
}
for i, testCase := range testCases {
- rh := NewGetReqHeaders()
- err := rh.SetRange(testCase.start, testCase.end)
+ opts := GetObjectOptions{}
+ err := opts.SetRange(testCase.start, testCase.end)
if err == nil && testCase.errVal != nil {
t.Errorf("Test %d: Expected to fail with '%v' but it passed",
i+1, testCase.errVal)
} else if err != nil && testCase.errVal.Error() != err.Error() {
t.Errorf("Test %d: Expected error '%v' but got error '%v'",
i+1, testCase.errVal, err)
- } else if err == nil && rh.Get("Range") != testCase.expected {
+ } else if err == nil && opts.headers["Range"] != testCase.expected {
t.Errorf("Test %d: Expected range header '%s', but got '%s'",
- i+1, testCase.expected, rh.Get("Range"))
+ i+1, testCase.expected, opts.headers["Range"])
}
}
}
diff --git a/hook-reader.go b/hook-reader.go
index bc9ece0..8f32291 100644
--- a/hook-reader.go
+++ b/hook-reader.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/chain.go b/pkg/credentials/chain.go
index 6b0e574..e29826f 100644
--- a/pkg/credentials/chain.go
+++ b/pkg/credentials/chain.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,8 +17,6 @@
package credentials
-import "fmt"
-
// A Chain will search for a provider which returns credentials
// and cache that provider until Retrieve is called again.
//
@@ -27,11 +25,11 @@ import "fmt"
// Providers in the list.
//
// If none of the Providers retrieve valid credentials Value, ChainProvider's
-// Retrieve() will return the error, collecting all errors from all providers.
+// Retrieve() will return the no credentials value.
//
// If a Provider is found which returns valid credentials Value ChainProvider
// will cache that Provider for all calls to IsExpired(), until Retrieve is
-// called again.
+// called again after IsExpired() is true.
//
// creds := credentials.NewChainCredentials(
// []credentials.Provider{
@@ -58,28 +56,30 @@ func NewChainCredentials(providers []Provider) *Credentials {
})
}
-// Retrieve returns the credentials value or error if no provider returned
-// without error.
+// Retrieve returns the credentials value, returns no credentials(anonymous)
+// if no credentials provider returned any value.
//
-// If a provider is found it will be cached and any calls to IsExpired()
-// will return the expired state of the cached provider.
+// If a provider is found with credentials, it will be cached and any calls
+// to IsExpired() will return the expired state of the cached provider.
func (c *Chain) Retrieve() (Value, error) {
- var errs []error
for _, p := range c.Providers {
- creds, err := p.Retrieve()
- if err != nil {
- errs = append(errs, err)
+ creds, _ := p.Retrieve()
+ // Always prioritize non-anonymous providers, if any.
+ if creds.AccessKeyID == "" && creds.SecretAccessKey == "" {
continue
- } // Success.
+ }
c.curr = p
return creds, nil
}
- c.curr = nil
- return Value{}, fmt.Errorf("No valid providers found %v", errs)
+ // At this point we have exhausted all the providers and
+ // are left without any credentials return anonymous.
+ return Value{
+ SignerType: SignatureAnonymous,
+ }, nil
}
// IsExpired will returned the expired state of the currently cached provider
-// if there is one. If there is no current provider, true will be returned.
+// if there is one. If there is no current provider, true will be returned.
func (c *Chain) IsExpired() bool {
if c.curr != nil {
return c.curr.IsExpired()
diff --git a/pkg/credentials/chain_test.go b/pkg/credentials/chain_test.go
index cb5a6dd..d26e376 100644
--- a/pkg/credentials/chain_test.go
+++ b/pkg/credentials/chain_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -76,7 +76,14 @@ func TestChainGet(t *testing.T) {
}
func TestChainIsExpired(t *testing.T) {
- credProvider := &credProvider{expired: true}
+ credProvider := &credProvider{
+ creds: Value{
+ AccessKeyID: "UXHW",
+ SecretAccessKey: "MYSECRET",
+ SessionToken: "",
+ },
+ expired: true,
+ }
p := &Chain{
Providers: []Provider{
credProvider,
diff --git a/pkg/credentials/credentials.go b/pkg/credentials/credentials.go
index cc30005..4bfdad4 100644
--- a/pkg/credentials/credentials.go
+++ b/pkg/credentials/credentials.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/credentials_test.go b/pkg/credentials/credentials_test.go
index cbfb673..92c77c4 100644
--- a/pkg/credentials/credentials_test.go
+++ b/pkg/credentials/credentials_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/doc.go b/pkg/credentials/doc.go
index fa1908a..c48784b 100644
--- a/pkg/credentials/doc.go
+++ b/pkg/credentials/doc.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
// Package credentials provides credential retrieval and management
// for S3 compatible object storage.
//
diff --git a/pkg/credentials/env_aws.go b/pkg/credentials/env_aws.go
index 1193443..f9b2cc3 100644
--- a/pkg/credentials/env_aws.go
+++ b/pkg/credentials/env_aws.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/env_minio.go b/pkg/credentials/env_minio.go
index 791087e..d72e771 100644
--- a/pkg/credentials/env_minio.go
+++ b/pkg/credentials/env_minio.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/env_test.go b/pkg/credentials/env_test.go
index 2f72bea..09cd77f 100644
--- a/pkg/credentials/env_test.go
+++ b/pkg/credentials/env_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/file_aws_credentials.go b/pkg/credentials/file_aws_credentials.go
index 1be6213..5ad6830 100644
--- a/pkg/credentials/file_aws_credentials.go
+++ b/pkg/credentials/file_aws_credentials.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -22,7 +22,7 @@ import (
"path/filepath"
"github.com/go-ini/ini"
- homedir "github.com/minio/go-homedir"
+ homedir "github.com/mitchellh/go-homedir"
)
// A FileAWSCredentials retrieves credentials from the current user's home
diff --git a/pkg/credentials/file_minio_client.go b/pkg/credentials/file_minio_client.go
index 9e26dd3..c282c2a 100644
--- a/pkg/credentials/file_minio_client.go
+++ b/pkg/credentials/file_minio_client.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -24,7 +24,7 @@ import (
"path/filepath"
"runtime"
- homedir "github.com/minio/go-homedir"
+ homedir "github.com/mitchellh/go-homedir"
)
// A FileMinioClient retrieves credentials from the current user's home
diff --git a/pkg/credentials/file_test.go b/pkg/credentials/file_test.go
index c62c533..c85c104 100644
--- a/pkg/credentials/file_test.go
+++ b/pkg/credentials/file_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/iam_aws.go b/pkg/credentials/iam_aws.go
index b862cf5..637df74 100644
--- a/pkg/credentials/iam_aws.go
+++ b/pkg/credentials/iam_aws.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -46,18 +46,6 @@ type IAM struct {
endpoint string
}
-// redirectHeaders copies all headers when following a redirect URL.
-// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
-func redirectHeaders(req *http.Request, via []*http.Request) error {
- if len(via) == 0 {
- return nil
- }
- for key, val := range via[0].Header {
- req.Header[key] = val
- }
- return nil
-}
-
// IAM Roles for Amazon EC2
// http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
const (
@@ -74,8 +62,7 @@ func NewIAM(endpoint string) *Credentials {
}
p := &IAM{
Client: &http.Client{
- Transport: http.DefaultTransport,
- CheckRedirect: redirectHeaders,
+ Transport: http.DefaultTransport,
},
endpoint: endpoint,
}
diff --git a/pkg/credentials/iam_aws_test.go b/pkg/credentials/iam_aws_test.go
index 3e5ad3e..86ea66b 100644
--- a/pkg/credentials/iam_aws_test.go
+++ b/pkg/credentials/iam_aws_test.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package credentials
import (
diff --git a/pkg/credentials/signature-type.go b/pkg/credentials/signature-type.go
index c64ad6c..1b768e8 100644
--- a/pkg/credentials/signature-type.go
+++ b/pkg/credentials/signature-type.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/static.go b/pkg/credentials/static.go
index 25aff56..8b0ba71 100644
--- a/pkg/credentials/static.go
+++ b/pkg/credentials/static.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/credentials/static_test.go b/pkg/credentials/static_test.go
index 491b155..f1d2d85 100644
--- a/pkg/credentials/static_test.go
+++ b/pkg/credentials/static_test.go
@@ -1,6 +1,6 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage
- * (C) 2017 Minio, Inc.
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/encrypt/cbc.go b/pkg/encrypt/cbc.go
index be45e52..b0f2d6e 100644
--- a/pkg/encrypt/cbc.go
+++ b/pkg/encrypt/cbc.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/encrypt/interface.go b/pkg/encrypt/interface.go
index 8b85543..482922a 100644
--- a/pkg/encrypt/interface.go
+++ b/pkg/encrypt/interface.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/encrypt/keys.go b/pkg/encrypt/keys.go
index 8814845..0ed95f5 100644
--- a/pkg/encrypt/keys.go
+++ b/pkg/encrypt/keys.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/policy/bucket-policy-condition.go b/pkg/policy/bucket-policy-condition.go
index 078bcd1..737b810 100644
--- a/pkg/policy/bucket-policy-condition.go
+++ b/pkg/policy/bucket-policy-condition.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/policy/bucket-policy-condition_test.go b/pkg/policy/bucket-policy-condition_test.go
index 419868f..9e4aa8f 100644
--- a/pkg/policy/bucket-policy-condition_test.go
+++ b/pkg/policy/bucket-policy-condition_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/policy/bucket-policy.go b/pkg/policy/bucket-policy.go
index b2d46e1..9dda99e 100644
--- a/pkg/policy/bucket-policy.go
+++ b/pkg/policy/bucket-policy.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/policy/bucket-policy_test.go b/pkg/policy/bucket-policy_test.go
index b1862c6..1e5196f 100644
--- a/pkg/policy/bucket-policy_test.go
+++ b/pkg/policy/bucket-policy_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/request-signature-streaming.go b/pkg/s3signer/request-signature-streaming.go
index 22059bb..156a6d6 100644
--- a/pkg/s3signer/request-signature-streaming.go
+++ b/pkg/s3signer/request-signature-streaming.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -32,7 +33,6 @@ import (
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming
const (
streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
- streamingEncoding = "aws-chunked"
streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD"
emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
payloadChunkSize = 64 * 1024
@@ -99,9 +99,8 @@ func prepareStreamingRequest(req *http.Request, sessionToken string, dataLen int
if sessionToken != "" {
req.Header.Set("X-Amz-Security-Token", sessionToken)
}
- req.Header.Set("Content-Encoding", streamingEncoding)
- req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
+ req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat))
// Set content length with streaming signature for each chunk included.
req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize))
req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10))
diff --git a/pkg/s3signer/request-signature-streaming_test.go b/pkg/s3signer/request-signature-streaming_test.go
index 1f49f22..535adb3 100644
--- a/pkg/s3signer/request-signature-streaming_test.go
+++ b/pkg/s3signer/request-signature-streaming_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,7 +43,7 @@ func TestGetSeedSignature(t *testing.T) {
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", "us-east-1", int64(dataLen), reqTime)
actualSeedSignature := req.Body.(*StreamingReader).seedSignature
- expectedSeedSignature := "007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
+ expectedSeedSignature := "38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079"
if actualSeedSignature != expectedSeedSignature {
t.Errorf("Expected %s but received %s", expectedSeedSignature, actualSeedSignature)
}
@@ -74,7 +75,7 @@ func TestSetStreamingAuthorization(t *testing.T) {
reqTime, _ := time.Parse(iso8601DateFormat, "20130524T000000Z")
req = StreamingSignV4(req, accessKeyID, secretAccessKeyID, "", location, dataLen, reqTime)
- expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=content-encoding;host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=007480502de61457e955731b0f5d191f7e6f54a8a0f6cc7974a5ebd887965686"
+ expectedAuthorization := "AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/20130524/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length;x-amz-storage-class,Signature=38cab3af09aa15ddf29e26e36236f60fb6bfb6243a20797ae9a8183674526079"
actualAuthorization := req.Header.Get("Authorization")
if actualAuthorization != expectedAuthorization {
diff --git a/pkg/s3signer/request-signature-v2.go b/pkg/s3signer/request-signature-v2.go
index af0e915..620af1c 100644
--- a/pkg/s3signer/request-signature-v2.go
+++ b/pkg/s3signer/request-signature-v2.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,9 +43,7 @@ const (
func encodeURL2Path(u *url.URL) (path string) {
// Encode URL path.
if isS3, _ := filepath.Match("*.s3*.amazonaws.com", u.Host); isS3 {
- hostSplits := strings.SplitN(u.Host, ".", 4)
- // First element is the bucket name.
- bucketName := hostSplits[0]
+ bucketName := u.Host[:strings.LastIndex(u.Host, ".s3")]
path = "/" + bucketName
path += u.Path
path = s3utils.EncodePath(path)
@@ -78,7 +77,7 @@ func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
}
// Get presigned string to sign.
- stringToSign := preStringifyHTTPReq(req)
+ stringToSign := preStringToSignV2(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -147,7 +146,7 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
}
// Calculate HMAC for secretAccessKey.
- stringToSign := stringifyHTTPReq(req)
+ stringToSign := stringToSignV2(req)
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(stringToSign))
@@ -172,15 +171,14 @@ func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request
// Expires + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func preStringifyHTTPReq(req http.Request) string {
+func preStringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writePreSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- isPreSign := true
- writeCanonicalizedResource(buf, req, isPreSign)
+ writeCanonicalizedResource(buf, req)
return buf.String()
}
@@ -200,15 +198,14 @@ func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) {
// Date + "\n" +
// CanonicalizedProtocolHeaders +
// CanonicalizedResource;
-func stringifyHTTPReq(req http.Request) string {
+func stringToSignV2(req http.Request) string {
buf := new(bytes.Buffer)
// Write standard headers.
writeSignV2Headers(buf, req)
// Write canonicalized protocol headers if any.
writeCanonicalizedHeaders(buf, req)
// Write canonicalized Query resources if any.
- isPreSign := false
- writeCanonicalizedResource(buf, req, isPreSign)
+ writeCanonicalizedResource(buf, req)
return buf.String()
}
@@ -255,17 +252,27 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
}
}
-// The following list is already sorted and should always be, otherwise we could
-// have signature-related issues
+// AWS S3 Signature V2 calculation rule is give here:
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html#RESTAuthenticationStringToSign
+
+// Whitelist resource list that will be used in query string for signature-V2 calculation.
+// The list should be alphabetically sorted
var resourceList = []string{
"acl",
"delete",
+ "lifecycle",
"location",
"logging",
"notification",
"partNumber",
"policy",
"requestPayment",
+ "response-cache-control",
+ "response-content-disposition",
+ "response-content-encoding",
+ "response-content-language",
+ "response-content-type",
+ "response-expires",
"torrent",
"uploadId",
"uploads",
@@ -280,22 +287,11 @@ var resourceList = []string{
// CanonicalizedResource = [ "/" + Bucket ] +
// <HTTP-Request-URI, from the protocol name up to the query string> +
// [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"];
-func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) {
+func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) {
// Save request URL.
requestURL := req.URL
// Get encoded URL path.
- path := encodeURL2Path(requestURL)
- if isPreSign {
- // Get encoded URL path.
- if len(requestURL.Query()) > 0 {
- // Keep the usual queries unescaped for string to sign.
- query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
- path = path + "?" + query
- }
- buf.WriteString(path)
- return
- }
- buf.WriteString(path)
+ buf.WriteString(encodeURL2Path(requestURL))
if requestURL.RawQuery != "" {
var n int
vals, _ := url.ParseQuery(requestURL.RawQuery)
diff --git a/pkg/s3signer/request-signature-v2_test.go b/pkg/s3signer/request-signature-v2_test.go
index 3c0e0ec..042b6e6 100644
--- a/pkg/s3signer/request-signature-v2_test.go
+++ b/pkg/s3signer/request-signature-v2_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/request-signature-v4.go b/pkg/s3signer/request-signature-v4.go
index 0d75dc1..d5721ac 100644
--- a/pkg/s3signer/request-signature-v4.go
+++ b/pkg/s3signer/request-signature-v4.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/request-signature_test.go b/pkg/s3signer/request-signature_test.go
index 85ff063..d53483e 100644
--- a/pkg/s3signer/request-signature_test.go
+++ b/pkg/s3signer/request-signature_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/test-utils_test.go b/pkg/s3signer/test-utils_test.go
index 049e581..cf96d66 100644
--- a/pkg/s3signer/test-utils_test.go
+++ b/pkg/s3signer/test-utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/utils.go b/pkg/s3signer/utils.go
index 0619b30..2924363 100644
--- a/pkg/s3signer/utils.go
+++ b/pkg/s3signer/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/s3signer/utils_test.go b/pkg/s3signer/utils_test.go
index b266e42..22a2d65 100644
--- a/pkg/s3signer/utils_test.go
+++ b/pkg/s3signer/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -25,6 +26,7 @@ import (
// Tests url encoding.
func TestEncodeURL2Path(t *testing.T) {
type urlStrings struct {
+ bucketName string
objName string
encodedObjName string
}
@@ -32,22 +34,27 @@ func TestEncodeURL2Path(t *testing.T) {
bucketName := "bucketName"
want := []urlStrings{
{
+ bucketName: "bucketName",
objName: "本語",
encodedObjName: "%E6%9C%AC%E8%AA%9E",
},
{
+ bucketName: "bucketName",
objName: "本語.1",
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
},
{
objName: ">123>3123123",
+ bucketName: "bucketName",
encodedObjName: "%3E123%3E3123123",
},
{
+ bucketName: "bucketName",
objName: "test 1 2.txt",
encodedObjName: "test%201%202.txt",
},
{
+ bucketName: "test.bucketName",
objName: "test++ 1.txt",
encodedObjName: "test%2B%2B%201.txt",
},
@@ -63,4 +70,5 @@ func TestEncodeURL2Path(t *testing.T) {
t.Fatal("Error")
}
}
+
}
diff --git a/pkg/s3utils/utils.go b/pkg/s3utils/utils.go
index 9d6ac4d..258390f 100644
--- a/pkg/s3utils/utils.go
+++ b/pkg/s3utils/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -80,6 +81,9 @@ func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
}
+// AmazonS3Host - regular expression used to determine if an arg is s3 host.
+var AmazonS3Host = regexp.MustCompile("^s3[.-]?(.*?)\\.amazonaws\\.com$")
+
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
@@ -88,7 +92,7 @@ func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonGovCloudEndpoint(endpointURL) {
return true
}
- return endpointURL.Host == "s3.amazonaws.com"
+ return AmazonS3Host.MatchString(endpointURL.Host)
}
// IsAmazonGovCloudEndpoint - Match if it is exactly Amazon S3 GovCloud endpoint.
@@ -205,7 +209,7 @@ func EncodePath(pathName string) string {
// We support '.' with bucket names but we fallback to using path
// style requests instead for such buckets.
var (
- validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-]{1,61}[A-Za-z0-9]$`)
+ validBucketName = regexp.MustCompile(`^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$`)
validBucketNameStrict = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
ipAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
)
@@ -240,14 +244,13 @@ func checkBucketNameCommon(bucketName string, strict bool) (err error) {
}
// CheckValidBucketName - checks if we have a valid input bucket name.
-// This is a non stricter version.
-// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func CheckValidBucketName(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, false)
}
// CheckValidBucketNameStrict - checks if we have a valid input bucket name.
// This is a stricter version.
+// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func CheckValidBucketNameStrict(bucketName string) (err error) {
return checkBucketNameCommon(bucketName, true)
}
diff --git a/pkg/s3utils/utils_test.go b/pkg/s3utils/utils_test.go
index 6be701d..f19e688 100644
--- a/pkg/s3utils/utils_test.go
+++ b/pkg/s3utils/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -301,10 +302,14 @@ func TestIsValidBucketName(t *testing.T) {
{"", errors.New("Bucket name cannot be empty"), false},
{"my..bucket", errors.New("Bucket name contains invalid characters"), false},
{"192.168.1.168", errors.New("Bucket name cannot be an ip address"), false},
+ {":bucketname", errors.New("Bucket name contains invalid characters"), false},
+ {"_bucketName", errors.New("Bucket name contains invalid characters"), false},
{"my.bucket.com", nil, true},
{"my-bucket", nil, true},
{"123my-bucket", nil, true},
{"Mybucket", nil, true},
+ {"My_bucket", nil, true},
+ {"My:bucket", nil, true},
}
for i, testCase := range testCases {
diff --git a/pkg/set/stringset.go b/pkg/set/stringset.go
index 9f33488..efd0262 100644
--- a/pkg/set/stringset.go
+++ b/pkg/set/stringset.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/pkg/set/stringset_test.go b/pkg/set/stringset_test.go
index e276fec..d7e6aa7 100644
--- a/pkg/set/stringset_test.go
+++ b/pkg/set/stringset_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/post-policy.go b/post-policy.go
index 5e71612..b3ae705 100644
--- a/post-policy.go
+++ b/post-policy.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package minio
import (
@@ -167,6 +184,28 @@ func (p *PostPolicy) SetSuccessStatusAction(status string) error {
return nil
}
+// SetUserMetadata - Set user metadata as a key/value couple.
+// Can be retrieved through a HEAD request or an event.
+func (p *PostPolicy) SetUserMetadata(key string, value string) error {
+ if strings.TrimSpace(key) == "" || key == "" {
+ return ErrInvalidArgument("Key is empty")
+ }
+ if strings.TrimSpace(value) == "" || value == "" {
+ return ErrInvalidArgument("Value is empty")
+ }
+ headerName := fmt.Sprintf("x-amz-meta-%s", key)
+ policyCond := policyCondition{
+ matchType: "eq",
+ condition: fmt.Sprintf("$%s", headerName),
+ value: value,
+ }
+ if err := p.addNewPolicy(policyCond); err != nil {
+ return err
+ }
+ p.formData[headerName] = value
+ return nil
+}
+
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {
diff --git a/retry-continous.go b/retry-continous.go
index e300af6..f31dfa6 100644
--- a/retry-continous.go
+++ b/retry-continous.go
@@ -1,3 +1,20 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
package minio
import "time"
diff --git a/retry.go b/retry.go
index 1de5107..2c8ceda 100644
--- a/retry.go
+++ b/retry.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/s3-endpoints.go b/s3-endpoints.go
index c02f3f1..2a86eaa 100644
--- a/s3-endpoints.go
+++ b/s3-endpoints.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/s3-error.go b/s3-error.go
index c5aff9b..f9e8233 100644
--- a/s3-error.go
+++ b/s3-error.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/test-utils_test.go b/test-utils_test.go
index 4134af9..6f6443c 100644
--- a/test-utils_test.go
+++ b/test-utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -64,11 +65,11 @@ func encodeResponse(response interface{}) []byte {
return bytesBuffer.Bytes()
}
-// Convert string to bool and always return true if any error
+// Convert string to bool and always return false if any error
func mustParseBool(str string) bool {
b, err := strconv.ParseBool(str)
if err != nil {
- return true
+ return false
}
return b
}
diff --git a/transport.go b/transport.go
new file mode 100644
index 0000000..e2dafe1
--- /dev/null
+++ b/transport.go
@@ -0,0 +1,48 @@
+// +build go1.7 go1.8
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2017 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+// This default transport is similar to http.DefaultTransport
+// but with additional DisableCompression:
+var defaultMinioTransport http.RoundTripper = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }).DialContext,
+ MaxIdleConns: 100,
+ IdleConnTimeout: 90 * time.Second,
+ TLSHandshakeTimeout: 10 * time.Second,
+ ExpectContinueTimeout: 1 * time.Second,
+ // Set this value so that the underlying transport round-tripper
+ // doesn't try to auto decode the body of objects with
+ // content-encoding set to `gzip`.
+ //
+ // Refer:
+ // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
+ DisableCompression: true,
+}
diff --git a/utils.go b/utils.go
index 6f54639..a8ff8cf 100644
--- a/utils.go
+++ b/utils.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -19,6 +20,8 @@ package minio
import (
"crypto/md5"
"crypto/sha256"
+ "encoding/base64"
+ "encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@@ -38,18 +41,18 @@ func xmlDecoder(body io.Reader, v interface{}) error {
return d.Decode(v)
}
-// sum256 calculate sha256 sum for an input byte array.
-func sum256(data []byte) []byte {
+// sum256 calculate sha256sum for an input byte array, returns hex encoded.
+func sum256Hex(data []byte) string {
hash := sha256.New()
hash.Write(data)
- return hash.Sum(nil)
+ return hex.EncodeToString(hash.Sum(nil))
}
-// sumMD5 calculate sumMD5 sum for an input byte array.
-func sumMD5(data []byte) []byte {
+// sumMD5Base64 calculate md5sum for an input byte array, returns base64 encoded.
+func sumMD5Base64(data []byte) string {
hash := md5.New()
hash.Write(data)
- return hash.Sum(nil)
+ return base64.StdEncoding.EncodeToString(hash.Sum(nil))
}
// getEndpointURL - construct a new endpoint.
@@ -109,10 +112,13 @@ func closeResponse(resp *http.Response) {
}
}
-var emptySHA256 = sum256(nil)
+var (
+ // Hex encoded string of nil sha256sum bytes.
+ emptySHA256Hex = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
-// Sentinel URL is the default url value which is invalid.
-var sentinelURL = url.URL{}
+ // Sentinel URL is the default url value which is invalid.
+ sentinelURL = url.URL{}
+)
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL url.URL) error {
@@ -212,3 +218,70 @@ func getDefaultLocation(u url.URL, regionOverride string) (location string) {
// Default to location to 'us-east-1'.
return "us-east-1"
}
+
+var supportedHeaders = []string{
+ "content-type",
+ "cache-control",
+ "content-encoding",
+ "content-disposition",
+ // Add more supported headers here.
+}
+
+// cseHeaders is list of client side encryption headers
+var cseHeaders = []string{
+ "X-Amz-Iv",
+ "X-Amz-Key",
+ "X-Amz-Matdesc",
+}
+
+// isStandardHeader returns true if header is a supported header and not a custom header
+func isStandardHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, header := range supportedHeaders {
+ if strings.ToLower(header) == key {
+ return true
+ }
+ }
+ return false
+}
+
+// isCSEHeader returns true if header is a client side encryption header.
+func isCSEHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, h := range cseHeaders {
+ header := strings.ToLower(h)
+ if (header == key) ||
+ (("x-amz-meta-" + header) == key) {
+ return true
+ }
+ }
+ return false
+}
+
+// sseHeaders is list of server side encryption headers
+var sseHeaders = []string{
+ "x-amz-server-side-encryption",
+ "x-amz-server-side-encryption-aws-kms-key-id",
+ "x-amz-server-side-encryption-context",
+ "x-amz-server-side-encryption-customer-algorithm",
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-server-side-encryption-customer-key-MD5",
+}
+
+// isSSEHeader returns true if header is a server side encryption header.
+func isSSEHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+ for _, h := range sseHeaders {
+ if strings.ToLower(h) == key {
+ return true
+ }
+ }
+ return false
+}
+
+// isAmzHeader returns true if header is a x-amz-meta-* or x-amz-acl header.
+func isAmzHeader(headerKey string) bool {
+ key := strings.ToLower(headerKey)
+
+ return strings.HasPrefix(key, "x-amz-meta-") || key == "x-amz-acl"
+}
diff --git a/utils_test.go b/utils_test.go
index ba29711..719ee4b 100644
--- a/utils_test.go
+++ b/utils_test.go
@@ -1,5 +1,6 @@
/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage
+ * Copyright 2015-2017 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -13,6 +14,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
package minio
import (
@@ -289,3 +291,105 @@ func TestIsValidBucketName(t *testing.T) {
}
}
+
+// Tests if header is standard supported header
+func TestIsStandardHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"content-encoding", true},
+ {"content-type", true},
+ {"cache-control", true},
+ {"content-disposition", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isStandardHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}
+
+// Tests if header is server encryption header
+func TestIsSSEHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-server-side-encryption", true},
+ {"x-amz-server-side-encryption-aws-kms-key-id", true},
+ {"x-amz-server-side-encryption-context", true},
+ {"x-amz-server-side-encryption-customer-algorithm", true},
+ {"x-amz-server-side-encryption-customer-key", true},
+ {"x-amz-server-side-encryption-customer-key-MD5", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isSSEHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+}
+
+// Tests if header is client encryption header
+func TestIsCSEHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-iv", true},
+ {"x-amz-key", true},
+ {"x-amz-matdesc", true},
+ {"x-amz-meta-x-amz-iv", true},
+ {"x-amz-meta-x-amz-key", true},
+ {"x-amz-meta-x-amz-matdesc", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isCSEHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}
+
+// Tests if header is x-amz-meta or x-amz-acl
+func TestIsAmzHeader(t *testing.T) {
+ testCases := []struct {
+ // Input.
+ header string
+ // Expected result.
+ expectedValue bool
+ }{
+ {"x-amz-iv", false},
+ {"x-amz-key", false},
+ {"x-amz-matdesc", false},
+ {"x-amz-meta-x-amz-iv", true},
+ {"x-amz-meta-x-amz-key", true},
+ {"x-amz-meta-x-amz-matdesc", true},
+ {"x-amz-acl", true},
+ {"random-header", false},
+ }
+
+ for i, testCase := range testCases {
+ actual := isAmzHeader(testCase.header)
+ if actual != testCase.expectedValue {
+ t.Errorf("Test %d: Expected to pass, but failed", i+1)
+ }
+ }
+
+}