summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--INSTALLGO.md83
-rw-r--r--README.md205
-rw-r--r--api-error-response.go20
-rw-r--r--api-error-response_test.go16
-rw-r--r--api-get-object-file.go2
-rw-r--r--api-get-object.go433
-rw-r--r--api-get-policy.go67
-rw-r--r--api-get-policy_test.go102
-rw-r--r--api-list.go11
-rw-r--r--api-notification.go215
-rw-r--r--api-put-bucket.go97
-rw-r--r--api-put-object-copy.go2
-rw-r--r--api-put-object-file.go179
-rw-r--r--api-put-object-multipart.go33
-rw-r--r--api-put-object-progress.go7
-rw-r--r--api-put-object-readat.go225
-rw-r--r--api-put-object.go3
-rw-r--r--api-remove.go122
-rw-r--r--api-s3-datatypes.go35
-rw-r--r--api-stat.go13
-rw-r--r--api.go4
-rw-r--r--api_functional_v2_test.go1310
-rw-r--r--api_functional_v4_test.go1380
-rw-r--r--api_unit_test.go55
-rw-r--r--bucket-cache.go25
-rw-r--r--bucket-cache_test.go2
-rw-r--r--bucket-notification.go228
-rw-r--r--bucket-policy.go618
-rw-r--r--bucket-policy_test.go645
-rw-r--r--constants.go8
-rw-r--r--copy-conditions.go8
-rw-r--r--docs/API.md (renamed from API.md)748
-rw-r--r--examples/minio/listenbucketnotification.go59
-rw-r--r--examples/s3/bucketexists.go8
-rw-r--r--examples/s3/getbucketnotification.go55
-rw-r--r--examples/s3/listbucketpolicies.go56
-rw-r--r--examples/s3/putobject-progress.go2
-rw-r--r--examples/s3/removeallbucketnotification.go49
-rw-r--r--examples/s3/removeobjects.go61
-rw-r--r--examples/s3/setbucketnotification.go85
-rw-r--r--examples/s3/setbucketpolicy.go11
-rw-r--r--pkg/policy/bucket-policy-condition.go115
-rw-r--r--pkg/policy/bucket-policy-condition_test.go289
-rw-r--r--pkg/policy/bucket-policy.go635
-rw-r--r--pkg/policy/bucket-policy_test.go1822
-rw-r--r--pkg/set/stringset.go196
-rw-r--r--pkg/set/stringset_test.go322
-rw-r--r--request-signature-v2.go1
-rw-r--r--request-signature-v4.go2
-rw-r--r--s3-endpoints.go2
-rw-r--r--utils.go13
-rw-r--r--utils_test.go26
53 files changed, 5801 insertions, 4911 deletions
diff --git a/.travis.yml b/.travis.yml
index 876ef9e..f61da45 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,7 +3,7 @@ language: go
os:
- linux
-- osx
+- osx
env:
- ARCH=x86_64
diff --git a/INSTALLGO.md b/INSTALLGO.md
deleted file mode 100644
index 81c3d53..0000000
--- a/INSTALLGO.md
+++ /dev/null
@@ -1,83 +0,0 @@
-## Ubuntu (Kylin) 14.04
-### Build Dependencies
-This installation guide is based on Ubuntu 14.04+ on x86-64 platform.
-
-##### Install Git, GCC
-```sh
-$ sudo apt-get install git build-essential
-```
-
-##### Install Go 1.5+
-
-Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/).
-
-```sh
-$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz
-$ mkdir -p ${HOME}/bin/
-$ mkdir -p ${HOME}/go/
-$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz
-```
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOROOT=${HOME}/bin/go
-export GOPATH=${HOME}/go
-export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin
-```
-```sh
-$ source ~/.bashrc
-```
-
-##### Testing it all
-```sh
-$ go env
-```
-
-## OS X (Yosemite) 10.10
-### Build Dependencies
-This installation document assumes OS X Yosemite 10.10+ on x86-64 platform.
-
-##### Install brew
-```sh
-$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
-```
-
-##### Install Git, Python
-```sh
-$ brew install git python
-```
-
-##### Install Go 1.5+
-
-Install golang binaries using `brew`
-
-```sh
-$ brew install go
-$ mkdir -p $HOME/go
-```
-
-##### Setup GOROOT and GOPATH
-
-Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries
-and GOPATH specifies the location of your project workspace.
-
-```sh
-export GOPATH=${HOME}/go
-export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6)
-export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec
-export PATH=$PATH:${GOPATH}/bin
-```
-
-##### Source the new environment
-
-```sh
-$ source ~/.bash_profile
-```
-
-##### Testing it all
-```sh
-$ go env
-```
diff --git a/README.md b/README.md
index 50b7926..16ed886 100644
--- a/README.md
+++ b/README.md
@@ -1,62 +1,73 @@
# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
-**List of supported cloud storage providers.**
+**Supported cloud storage providers:**
+
- AWS Signature Version 4
- Amazon S3
- Minio
+
+
- AWS Signature Version 2
- Google Cloud Storage (Compatibility Mode)
- Openstack Swift + Swift3 middleware
- Ceph Object Gateway
- Riak CS
-This quickstart guide will show you how to install the client SDK and execute an example Golang program. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference) documentation.
+This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
-This document assumes that you have a working [Golang](https://docs.minio.io/docs/how-to-install-golang) setup in place.
+This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github
```sh
+
$ go get -u github.com/minio/minio-go
+
```
## Initialize Minio Client
-You need four items in order to connect to Minio object storage server.
+You need four items to connect to Minio object storage server.
-| Params | Description|
+
+| Parameter | Description|
| :--- | :--- |
| endpoint | URL to object storage service. |
-| accessKeyID | Access key is like user ID that uniquely identifies your account. |
+| accessKeyID | Access key is the user ID that uniquely identifies your account. |
| secretAccessKey | Secret key is the password to your account. |
-|secure | Set this value to 'true' to enable secure (HTTPS) access. |
+| secure | Set this value to 'true' to enable secure (HTTPS) access. |
```go
+
package main
import (
- "fmt"
-
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
+ "log"
)
func main() {
- // Use a secure connection.
- ssl := true
-
- // Initialize minio client object.
- minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
- if err != nil {
- fmt.Println(err)
- return
- }
-}
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("%v", minioClient) // minioClient is now setup
+
+
```
## Quick Start Example - File Uploader
+
This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
@@ -65,117 +76,165 @@ This example program connects to an object storage server, makes a bucket on the
We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public.
#### FileUploader.go
+
```go
package main
-import "fmt"
import (
- "log"
-
- "github.com/minio/minio-go"
+ "github.com/minio/minio-go"
+ "log"
)
func main() {
- // Use a secure connection.
- ssl := true
-
- // Initialize minio client object.
- minioClient, err := minio.New("play.minio.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG", ssl)
- if err != nil {
- log.Fatalln(err)
- }
-
- // Make a new bucket called mymusic.
- err = minioClient.MakeBucket("mymusic", "us-east-1")
- if err != nil {
- log.Fatalln(err)
- }
- fmt.Println("Successfully created mymusic")
-
- // Upload the zip file with FPutObject.
- n, err := minioClient.FPutObject("mymusic", "golden-oldies.zip", "/tmp/golden-oldies.zip", "application/zip")
- if err != nil {
- log.Fatalln(err)
- }
- log.Printf("Successfully uploaded golden-oldies.zip of size %d\n", n)
+ endpoint := "play.minio.io:9000"
+ accessKeyID := "Q3AM3UQ867SPQQA43P2F"
+ secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+ useSSL := true
+
+ // Initialize minio client object.
+ minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // Make a new bucked called mymusic.
+ bucketName := "mymusic"
+ location := "us-east-1"
+
+ err = minioClient.MakeBucket(bucketName, location)
+ if err != nil {
+ // Check to see if we already own this bucket (which happens if you run this twice)
+ exists, err := minioClient.BucketExists(bucketName)
+ if err == nil && exists {
+ log.Printf("We already own %s\n", bucketName)
+ } else {
+ log.Fatalln(err)
+ }
+ }
+ log.Printf("Successfully created %s\n", bucketName)
+
+ // Upload the zip file
+ objectName := "golden-oldies.zip"
+ filePath := "/tmp/golden-oldies.zip"
+ contentType := "application/zip"
+
+ // Upload the zip file with FPutObject
+ n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Printf("Successfully uploaded %s of size %d\n", objectName, n)
}
```
+
#### Run FileUploader
-```bash
+
+```sh
+
$ go run file-uploader.go
-$ Successfully created mymusic
-$ Successfully uploaded golden-oldies.zip of size 17MiB
+2016/08/13 17:03:28 Successfully created mymusic
+2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
$ mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
+
```
## API Reference
+
The full API Reference is available here.
+
* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference)
### API Reference : Bucket Operations
+
* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket)
* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets)
* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists)
* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket)
* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects)
-* ListObjectsV2
+* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2)
* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads)
### API Reference : Bucket policy Operations
+
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
+* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
+
+### API Reference : Bucket notification Operations
+
+* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification)
+* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification)
+* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification)
+* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension)
### API Reference : File Object Operations
+
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
-* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
### API Reference : Object Operations
+
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
+* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
+* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference : Presigned Operations
+
* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject)
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
-
## Full Examples
#### Full Examples : Bucket Operations
-* [listbuckets.go](./examples/s3/listbuckets.go)
-* [listobjects.go](./examples/s3/listobjects.go)
-* [bucketexists.go](./examples/s3/bucketexists.go)
-* [makebucket.go](./examples/s3/makebucket.go)
-* [removebucket.go](./examples/s3/removebucket.go)
-* [listincompleteuploads.go](./examples/s3/listincompleteuploads.go)
+
+* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
+* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
+* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
+* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
+* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
+* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
+* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
#### Full Examples : Bucket policy Operations
-* [setbucketpolicy.go](./examples/s3/setbucketpolicy.go)
-* [getbucketpolicy.go](./examples/s3/getbucketpolicy.go)
+
+* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
+* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
+* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
+#### Full Examples : Bucket notification Operations
+
+* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
+* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
+* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
+* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
+
#### Full Examples : File Object Operations
-* [fputobject.go](./examples/s3/fputobject.go)
-* [fgetobject.go](./examples/s3/fgetobject.go)
-* [copyobject.go](./examples/s3/copyobject.go)
+
+* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
+* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
#### Full Examples : Object Operations
-* [putobject.go](./examples/s3/putobject.go)
-* [getobject.go](./examples/s3/getobject.go)
-* [listobjects.go](./examples/s3/listobjects.go)
-* [listobjectsV2.go](./examples/s3/listobjectsV2.go)
-* [removeobject.go](./examples/s3/removeobject.go)
-* [statobject.go](./examples/s3/statobject.go)
+
+* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
+* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
+* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
+* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
+* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
+* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
+* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
#### Full Examples : Presigned Operations
-* [presignedgetobject.go](./examples/s3/presignedgetobject.go)
-* [presignedputobject.go](./examples/s3/presignedputobject.go)
-* [presignedpostpolicy.go](./examples/s3/presignedpostpolicy.go)
+* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
+* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go)
+* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go)
## Explore Further
* [Complete Documentation](https://docs.minio.io)
@@ -184,7 +243,7 @@ The full API Reference is available here.
## Contribute
-[Contributors Guide](./CONTRIBUTING.md)
+[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md)
[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go)
[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go)
diff --git a/api-error-response.go b/api-error-response.go
index 3bfff44..bcfad37 100644
--- a/api-error-response.go
+++ b/api-error-response.go
@@ -201,16 +201,6 @@ func ErrInvalidObjectName(message string) error {
}
}
-// ErrInvalidParts - Invalid number of parts.
-func ErrInvalidParts(expectedParts, uploadedParts int) error {
- msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
- return ErrorResponse{
- Code: "InvalidParts",
- Message: msg,
- RequestID: "minio",
- }
-}
-
// ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName
@@ -233,3 +223,13 @@ func ErrNoSuchBucketPolicy(message string) error {
RequestID: "minio",
}
}
+
+// ErrAPINotSupported - API not supported response
+// The specified API call is not supported
+func ErrAPINotSupported(message string) error {
+ return ErrorResponse{
+ Code: "APINotSupported",
+ Message: message,
+ RequestID: "minio",
+ }
+}
diff --git a/api-error-response_test.go b/api-error-response_test.go
index a4e5bdc..11f5716 100644
--- a/api-error-response_test.go
+++ b/api-error-response_test.go
@@ -97,7 +97,7 @@ func TestHttpRespToErrorResponse(t *testing.T) {
}
// Generate http response with empty body.
- // Set the StatusCode to the arugment supplied.
+ // Set the StatusCode to the argument supplied.
// Sets common headers.
genEmptyBodyResponse := func(statusCode int) *http.Response {
resp := &http.Response{}
@@ -249,20 +249,6 @@ func TestErrInvalidObjectName(t *testing.T) {
}
}
-// Test validates 'ErrInvalidParts' error response.
-func TestErrInvalidParts(t *testing.T) {
- msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", 10, 9)
- expectedResult := ErrorResponse{
- Code: "InvalidParts",
- Message: msg,
- RequestID: "minio",
- }
- actualResult := ErrInvalidParts(10, 9)
- if !reflect.DeepEqual(expectedResult, actualResult) {
- t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
- }
-}
-
// Test validates 'ErrInvalidArgument' response.
func TestErrInvalidArgument(t *testing.T) {
expectedResult := ErrorResponse{
diff --git a/api-get-object-file.go b/api-get-object-file.go
index 265a58e..a38fc85 100644
--- a/api-get-object-file.go
+++ b/api-get-object-file.go
@@ -48,7 +48,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error {
}
}
- // Extract top level direcotry.
+ // Extract top level directory.
objectDir, _ := filepath.Split(filePath)
if objectDir != "" {
// Create any missing top level directories.
diff --git a/api-get-object.go b/api-get-object.go
index 0603682..1f0dabb 100644
--- a/api-get-object.go
+++ b/api-get-object.go
@@ -36,16 +36,13 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
return nil, err
}
- // Start the request as soon Get is initiated.
- httpReader, objectInfo, err := c.getObject(bucketName, objectName, 0, 0)
- if err != nil {
- return nil, err
- }
-
+ var httpReader io.ReadCloser
+ var objectInfo ObjectInfo
+ var err error
// Create request channel.
- reqCh := make(chan readRequest)
+ reqCh := make(chan getRequest)
// Create response channel.
- resCh := make(chan readResponse)
+ resCh := make(chan getResponse)
// Create done channel.
doneCh := make(chan struct{})
@@ -61,58 +58,148 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
case <-doneCh:
// Close the http response body before returning.
// This ends the connection with the server.
- httpReader.Close()
+ if httpReader != nil {
+ httpReader.Close()
+ }
return
- // Request message.
+
+ // Gather incoming request.
case req := <-reqCh:
- // Offset changes fetch the new object at an Offset.
- if req.DidOffsetChange {
- if httpReader != nil {
- // Close previously opened http reader.
- httpReader.Close()
+ // If this is the first request we may not need to do a getObject request yet.
+ if req.isFirstReq {
+ // First request is a Read/ReadAt.
+ if req.isReadOp {
+ // Differentiate between wanting the whole object and just a range.
+ if req.isReadAt {
+ // If this is a ReadAt request only get the specified range.
+ // Range is set with respect to the offset and length of the buffer requested.
+ // Do not set objectInfo from the first readAt request because it will not get
+ // the whole object.
+ httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
+ } else {
+ // First request is a Read request.
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ // Read at least firstReq.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ }
+ } else {
+ // First request is a Stat or Seek call.
+ // Only need to run a StatObject until an actual Read or ReadAt request comes through.
+ objectInfo, err = c.StatObject(bucketName, objectName)
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ // Exit the go-routine.
+ return
+ }
+ // Send back the first response.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
}
- // Read from offset.
- httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ } else if req.settingObjectInfo { // Request is just to get objectInfo.
+ objectInfo, err := c.StatObject(bucketName, objectName)
if err != nil {
- resCh <- readResponse{
+ resCh <- getResponse{
Error: err,
}
+ // Exit the goroutine.
return
}
- }
+ // Send back the objectInfo.
+ resCh <- getResponse{
+ objectInfo: objectInfo,
+ }
+ } else {
+ // Offset changes fetch the new object at an Offset.
+ // Because the httpReader may not be set by the first
+ // request if it was a stat or seek it must be checked
+ // if the object has been read or not to only initialize
+ // new ones when they haven't been already.
+ // All readAt requests are new requests.
+ if req.DidOffsetChange || !req.beenRead {
+ if httpReader != nil {
+ // Close previously opened http reader.
+ httpReader.Close()
+ }
+ // If this request is a readAt only get the specified range.
+ if req.isReadAt {
+ // Range is set with respect to the offset and length of the buffer requested.
+ httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
+ } else {
+ httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
+ }
+ if err != nil {
+ resCh <- getResponse{
+ Error: err,
+ }
+ return
+ }
+ }
- // Read at least req.Buffer bytes, if not we have
- // reached our EOF.
- size, err := io.ReadFull(httpReader, req.Buffer)
- if err == io.ErrUnexpectedEOF {
- // If an EOF happens after reading some but not
- // all the bytes ReadFull returns ErrUnexpectedEOF
- err = io.EOF
- }
- // Reply back how much was read.
- resCh <- readResponse{
- Size: int(size),
- Error: err,
+ // Read at least req.Buffer bytes, if not we have
+ // reached our EOF.
+ size, err := io.ReadFull(httpReader, req.Buffer)
+ if err == io.ErrUnexpectedEOF {
+ // If an EOF happens after reading some but not
+ // all the bytes ReadFull returns ErrUnexpectedEOF
+ err = io.EOF
+ }
+ // Reply back how much was read.
+ resCh <- getResponse{
+ Size: int(size),
+ Error: err,
+ didRead: true,
+ objectInfo: objectInfo,
+ }
}
}
}
}()
- // Return the readerAt backed by routine.
- return newObject(reqCh, resCh, doneCh, objectInfo), nil
-}
-// Read response message container to reply back for the request.
-type readResponse struct {
- Size int
- Error error
+ // Create a newObject through the information sent back by reqCh.
+ return newObject(reqCh, resCh, doneCh), nil
}
-// Read request message container to communicate with internal
+// get request message container to communicate with internal
// go-routine.
-type readRequest struct {
- Buffer []byte
- Offset int64 // readAt offset.
- DidOffsetChange bool
+type getRequest struct {
+ Buffer []byte
+ Offset int64 // readAt offset.
+ DidOffsetChange bool // Tracks the offset changes for Seek requests.
+ beenRead bool // Determines if this is the first time an object is being read.
+ isReadAt bool // Determines if this request is a request to a specific range
+ isReadOp bool // Determines if this request is a Read or Read/At request.
+ isFirstReq bool // Determines if this request is the first time an object is being accessed.
+ settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
+}
+
+// get response message container to reply back for the request.
+type getResponse struct {
+ Size int
+ Error error
+ didRead bool // Lets subsequent calls know whether or not httpReader has been initiated.
+ objectInfo ObjectInfo // Used for the first request.
}
// Object represents an open object. It implements Read, ReadAt,
@@ -122,8 +209,8 @@ type Object struct {
mutex *sync.Mutex
// User allocated and defined.
- reqCh chan<- readRequest
- resCh <-chan readResponse
+ reqCh chan<- getRequest
+ resCh <-chan getResponse
doneCh chan<- struct{}
prevOffset int64
currOffset int64
@@ -132,8 +219,60 @@ type Object struct {
// Keeps track of closed call.
isClosed bool
+ // Keeps track of if this is the first call.
+ isStarted bool
+
// Previous error saved for future calls.
prevErr error
+
+ // Keeps track of if this object has been read yet.
+ beenRead bool
+
+ // Keeps track of if objectInfo has been set yet.
+ objectInfoSet bool
+}
+
+// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
+// Returns back the size of the buffer read, if anything was read, as well
+// as any error encountered. For all first requests sent on the object
+// it is also responsible for sending back the objectInfo.
+func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
+ o.reqCh <- request
+ response := <-o.resCh
+ // This was the first request.
+ if !o.isStarted {
+ // The object has been operated on.
+ o.isStarted = true
+ }
+ // Set the objectInfo if the request was not readAt
+ // and it hasn't been set before.
+ if !o.objectInfoSet && !request.isReadAt {
+ o.objectInfo = response.objectInfo
+ o.objectInfoSet = true
+ }
+ // Set beenRead only if it has not been set before.
+ if !o.beenRead {
+ o.beenRead = response.didRead
+ }
+ // Return any error to the top level.
+ if response.Error != nil {
+ return response, response.Error
+ }
+ return response, nil
+}
+
+// setOffset - handles the setting of offsets for
+// Read/ReadAt/Seek requests.
+func (o *Object) setOffset(bytesRead int64) error {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ // Save the current offset as previous offset.
+ o.prevOffset = o.currOffset
+
+ if o.currOffset >= o.objectInfo.Size {
+ return io.EOF
+ }
+ return nil
}
// Read reads up to len(p) bytes into p. It returns the number of
@@ -152,16 +291,17 @@ func (o *Object) Read(b []byte) (n int, err error) {
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
-
- // If current offset has reached Size limit, return EOF.
- if o.currOffset >= o.objectInfo.Size {
- return 0, io.EOF
+ // Create a new request.
+ readReq := getRequest{
+ isReadOp: true,
+ beenRead: o.beenRead,
+ Buffer: b,
}
- // Send current information over control channel to indicate we are ready.
- reqMsg := readRequest{}
- // Send the pointer to the buffer over the channel.
- reqMsg.Buffer = b
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readReq.isFirstReq = true
+ }
// Verify if offset has changed and currOffset is greater than
// previous offset. Perhaps due to Seek().
@@ -171,42 +311,36 @@ func (o *Object) Read(b []byte) (n int, err error) {
}
if offsetChange > 0 {
// Fetch the new reader at the current offset again.
- reqMsg.Offset = o.currOffset
- reqMsg.DidOffsetChange = true
+ readReq.Offset = o.currOffset
+ readReq.DidOffsetChange = true
} else {
// No offset changes no need to fetch new reader, continue
// reading.
- reqMsg.DidOffsetChange = false
- reqMsg.Offset = 0
+ readReq.DidOffsetChange = false
+ readReq.Offset = 0
}
- // Send read request over the control channel.
- o.reqCh <- reqMsg
-
- // Get data over the response channel.
- dataMsg := <-o.resCh
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readReq)
+ if err != nil && err != io.EOF {
+ // Save the error for future calls.
+ o.prevErr = err
+ return response.Size, err
+ }
// Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save the current offset as previous offset.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currOffset read is equal to objectSize
- // We have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
- }
- return dataMsg.Size, nil
+ bytesRead := int64(response.Size)
+
+ // Set the new offset.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ // Save the error for future calls.
+ o.prevErr = oerr
+ return response.Size, oerr
}
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
+ // Return the response.
+ return response.Size, err
}
// Stat returns the ObjectInfo structure describing object.
@@ -218,10 +352,25 @@ func (o *Object) Stat() (ObjectInfo, error) {
o.mutex.Lock()
defer o.mutex.Unlock()
- if o.prevErr != nil || o.isClosed {
+ if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
return ObjectInfo{}, o.prevErr
}
+ // This is the first request.
+ if !o.isStarted || !o.objectInfoSet {
+ statReq := getRequest{
+ isFirstReq: !o.isStarted,
+ settingObjectInfo: !o.objectInfoSet,
+ }
+
+ // Send the request and get the response.
+ _, err := o.doGetRequest(statReq)
+ if err != nil {
+ o.prevErr = err
+ return ObjectInfo{}, err
+ }
+ }
+
return o.objectInfo, nil
}
@@ -243,56 +392,57 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
return 0, o.prevErr
}
- // If offset is negative and offset is greater than or equal to
- // object size we return EOF.
- if offset < 0 || offset >= o.objectInfo.Size {
- return 0, io.EOF
+ // Can only compare offsets to size when size has been set.
+ if o.objectInfoSet {
+ // If offset is negative than we return io.EOF.
+ // If offset is greater than or equal to object size we return io.EOF.
+ if offset >= o.objectInfo.Size || offset < 0 {
+ return 0, io.EOF
+ }
}
- // Send current information over control channel to indicate we
- // are ready.
- reqMsg := readRequest{}
-
- // Send the offset and pointer to the buffer over the channel.
- reqMsg.Buffer = b
-
- // For ReadAt offset always changes, minor optimization where
- // offset same as currOffset we don't change the offset.
- reqMsg.DidOffsetChange = offset != o.currOffset
- if reqMsg.DidOffsetChange {
- // Set new offset.
- reqMsg.Offset = offset
- // Save new offset as current offset.
- o.currOffset = offset
+ // Create the new readAt request.
+ readAtReq := getRequest{
+ isReadOp: true,
+ isReadAt: true,
+ DidOffsetChange: true, // Offset always changes.
+ beenRead: o.beenRead, // Set if this is the first request to try and read.
+ Offset: offset, // Set the offset.
+ Buffer: b,
}
- // Send read request over the control channel.
- o.reqCh <- reqMsg
-
- // Get data over the response channel.
- dataMsg := <-o.resCh
+ // Alert that this is the first request.
+ if !o.isStarted {
+ readAtReq.isFirstReq = true
+ }
+ // Send and receive from the first request.
+ response, err := o.doGetRequest(readAtReq)
+ if err != nil && err != io.EOF {
+ // Save the error.
+ o.prevErr = err
+ return response.Size, err
+ }
// Bytes read.
- bytesRead := int64(dataMsg.Size)
-
- // Update current offset.
- o.currOffset += bytesRead
-
- // Save current offset as previous offset before returning.
- o.prevOffset = o.currOffset
-
- if dataMsg.Error == nil {
- // If currentOffset is equal to objectSize
- // we have reached end of file, we return io.EOF.
- if o.currOffset >= o.objectInfo.Size {
- return dataMsg.Size, io.EOF
+ bytesRead := int64(response.Size)
+ // There is no valid objectInfo yet
+ // to compare against for EOF.
+ if !o.objectInfoSet {
+ // Update the currentOffset.
+ o.currOffset += bytesRead
+ // Save the current offset as previous offset.
+ o.prevOffset = o.currOffset
+ } else {
+ // If this was not the first request update
+ // the offsets and compare against objectInfo
+ // for EOF.
+ oerr := o.setOffset(bytesRead)
+ if oerr != nil {
+ o.prevErr = oerr
+ return response.Size, oerr
}
- return dataMsg.Size, nil
}
-
- // Save any error.
- o.prevErr = dataMsg.Error
- return dataMsg.Size, dataMsg.Error
+ return response.Size, err
}
// Seek sets the offset for the next Read or Write to offset,
@@ -325,6 +475,23 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence))
}
+ // This is the first request. So before anything else
+ // get the ObjectInfo.
+ if !o.isStarted || !o.objectInfoSet {
+ // Create the new Seek request.
+ seekReq := getRequest{
+ isReadOp: false,
+ Offset: offset,
+ isFirstReq: true,
+ }
+ // Send and receive from the seek request.
+ _, err := o.doGetRequest(seekReq)
+ if err != nil {
+ // Save the error.
+ o.prevErr = err
+ return 0, err
+ }
+ }
// Save current offset as previous offset.
o.prevOffset = o.currOffset
@@ -353,7 +520,12 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.objectInfo.Size+offset < 0 {
return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence))
}
- o.currOffset += offset
+ o.currOffset = o.objectInfo.Size + offset
+ }
+ // Reset the saved error since we successfully seeked, let the Read
+ // and ReadAt decide.
+ if o.prevErr == io.EOF {
+ o.prevErr = nil
}
// Return the effective offset.
return o.currOffset, nil
@@ -386,13 +558,13 @@ func (o *Object) Close() (err error) {
}
// newObject instantiates a new *minio.Object*
-func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object {
+// ObjectInfo will be set by setObjectInfo
+func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object {
return &Object{
- mutex: &sync.Mutex{},
- reqCh: reqCh,
- resCh: resCh,
- doneCh: doneCh,
- objectInfo: objectInfo,
+ mutex: &sync.Mutex{},
+ reqCh: reqCh,
+ resCh: resCh,
+ doneCh: doneCh,
}
}
@@ -414,6 +586,7 @@ func (c Client) getObject(bucketName, objectName string, offset, length int64) (
customHeader := make(http.Header)
// Set ranges if length and offset are valid.
+ // See https://tools.ietf.org/html/rfc7233#section-3.1 for reference.
if length > 0 && offset >= 0 {
customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
} else if offset > 0 && length == 0 {
diff --git a/api-get-policy.go b/api-get-policy.go
index 1004461..07b1fa4 100644
--- a/api-get-policy.go
+++ b/api-get-policy.go
@@ -17,31 +17,48 @@
package minio
import (
- "io"
+ "encoding/json"
"io/ioutil"
"net/http"
"net/url"
- "sort"
+
+ "github.com/minio/minio-go/pkg/policy"
)
// GetBucketPolicy - get bucket policy at a given path.
-func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy BucketPolicy, err error) {
+func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ if err := isValidObjectPrefix(objectPrefix); err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
+ if err != nil {
+ return policy.BucketPolicyNone, err
+ }
+ return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
+}
+
+// GetBucketPolicy - get bucket policy rules at a given path.
+func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
- return BucketPolicyNone, err
+ return map[string]policy.BucketPolicy{}, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
- return BucketPolicyNone, err
+ return map[string]policy.BucketPolicy{}, err
}
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil {
- return BucketPolicyNone, err
+ return map[string]policy.BucketPolicy{}, err
}
- return identifyPolicyType(policy, bucketName, objectPrefix), nil
+ return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
// Request server for policy.
-func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketAccessPolicy, error) {
+func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.
urlValues := make(url.Values)
@@ -55,38 +72,24 @@ func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (BucketA
defer closeResponse(resp)
if err != nil {
- return BucketAccessPolicy{}, err
+ return policy.BucketAccessPolicy{}, err
}
- return processBucketPolicyResponse(bucketName, resp)
-
-}
-// processes the GetPolicy http response from the server.
-func processBucketPolicyResponse(bucketName string, resp *http.Response) (BucketAccessPolicy, error) {
if resp != nil {
if resp.StatusCode != http.StatusOK {
errResponse := httpRespToErrorResponse(resp, bucketName, "")
if ToErrorResponse(errResponse).Code == "NoSuchBucketPolicy" {
- return BucketAccessPolicy{Version: "2012-10-17"}, nil
+ return policy.BucketAccessPolicy{Version: "2012-10-17"}, nil
}
- return BucketAccessPolicy{}, errResponse
+ return policy.BucketAccessPolicy{}, errResponse
}
}
- // Read access policy up to maxAccessPolicySize.
- // http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
- // bucket policies are limited to 20KB in size, using a limit reader.
- bucketPolicyBuf, err := ioutil.ReadAll(io.LimitReader(resp.Body, maxAccessPolicySize))
+ bucketPolicyBuf, err := ioutil.ReadAll(resp.Body)
if err != nil {
- return BucketAccessPolicy{}, err
+ return policy.BucketAccessPolicy{}, err
}
- policy, err := unMarshalBucketPolicy(bucketPolicyBuf)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- // Sort the policy actions and resources for convenience.
- for _, statement := range policy.Statements {
- sort.Strings(statement.Actions)
- sort.Strings(statement.Resources)
- }
- return policy, nil
+
+ policy := policy.BucketAccessPolicy{}
+ err = json.Unmarshal(bucketPolicyBuf, &policy)
+ return policy, err
}
diff --git a/api-get-policy_test.go b/api-get-policy_test.go
deleted file mode 100644
index a15f535..0000000
--- a/api-get-policy_test.go
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "bytes"
- "encoding/json"
- "io/ioutil"
- "net/http"
- "reflect"
- "testing"
-)
-
-// Mocks valid http response containing bucket policy from server.
-func generatePolicyResponse(resp *http.Response, policy BucketAccessPolicy) (*http.Response, error) {
- policyBytes, err := json.Marshal(policy)
- if err != nil {
- return nil, err
- }
- resp.StatusCode = http.StatusOK
- resp.Body = ioutil.NopCloser(bytes.NewBuffer(policyBytes))
- return resp, nil
-}
-
-// Tests the processing of GetPolicy response from server.
-func TestProcessBucketPolicyResopnse(t *testing.T) {
- bucketAccesPolicies := []BucketAccessPolicy{
- {Version: "1.0"},
- {Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
- {Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
- {Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
- }
-
- APIErrors := []APIError{
- {
- Code: "NoSuchBucketPolicy",
- Description: "The specified bucket does not have a bucket policy.",
- HTTPStatusCode: http.StatusNotFound,
- },
- }
- testCases := []struct {
- bucketName string
- isAPIError bool
- apiErr APIError
- // expected results.
- expectedResult BucketAccessPolicy
- err error
- // flag indicating whether tests should pass.
- shouldPass bool
- }{
- {"my-bucket", true, APIErrors[0], BucketAccessPolicy{Version: "2012-10-17"}, nil, true},
- {"my-bucket", false, APIError{}, bucketAccesPolicies[0], nil, true},
- {"my-bucket", false, APIError{}, bucketAccesPolicies[1], nil, true},
- {"my-bucket", false, APIError{}, bucketAccesPolicies[2], nil, true},
- {"my-bucket", false, APIError{}, bucketAccesPolicies[3], nil, true},
- }
-
- for i, testCase := range testCases {
- inputResponse := &http.Response{}
- var err error
- if testCase.isAPIError {
- inputResponse = generateErrorResponse(inputResponse, testCase.apiErr, testCase.bucketName)
- } else {
- inputResponse, err = generatePolicyResponse(inputResponse, testCase.expectedResult)
- if err != nil {
- t.Fatalf("Test %d: Creation of valid response failed", i+1)
- }
- }
- actualResult, err := processBucketPolicyResponse("my-bucket", inputResponse)
- if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
- }
- if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
- }
- // Failed as expected, but does it fail for the expected reason.
- if err != nil && !testCase.shouldPass {
- if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
- }
- }
- if err == nil && testCase.shouldPass {
- if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
- t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
- }
- }
- }
-}
diff --git a/api-list.go b/api-list.go
index 60c03e3..795de61 100644
--- a/api-list.go
+++ b/api-list.go
@@ -84,6 +84,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// If recursive we do not delimit.
delimiter = ""
}
+ // Return object owner information by default
+ fetchOwner := true
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
@@ -108,7 +110,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
- result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter, 1000)
+ result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@@ -166,7 +168,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
-func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter string, maxkeys int) (listBucketV2Result, error) {
+func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return listBucketV2Result{}, err
@@ -195,6 +197,11 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken,
urlValues.Set("delimiter", delimiter)
}
+ // Fetch owner when listing
+ if fetchOwner {
+ urlValues.Set("fetch-owner", "true")
+ }
+
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
diff --git a/api-notification.go b/api-notification.go
new file mode 100644
index 0000000..85e5780
--- /dev/null
+++ b/api-notification.go
@@ -0,0 +1,215 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "bufio"
+ "encoding/json"
+ "io"
+ "net/http"
+ "net/url"
+)
+
+// GetBucketNotification - get bucket notification at a given path.
+func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return BucketNotification{}, err
+ }
+ notification, err := c.getBucketNotification(bucketName)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return notification, nil
+}
+
+// Request server for notification rules.
+func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) {
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+
+ defer closeResponse(resp)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return processBucketNotificationResponse(bucketName, resp)
+
+}
+
+// processes the GetNotification http response from the server.
+func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) {
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ return BucketNotification{}, errResponse
+ }
+ var bucketNotification BucketNotification
+ err := xmlDecoder(resp.Body, &bucketNotification)
+ if err != nil {
+ return BucketNotification{}, err
+ }
+ return bucketNotification, nil
+}
+
+// Indentity represents the user id, this is a compliance field.
+type identity struct {
+ PrincipalID string `json:"principalId"`
+}
+
+// Notification event bucket metadata.
+type bucketMeta struct {
+ Name string `json:"name"`
+ OwnerIdentity identity `json:"ownerIdentity"`
+ ARN string `json:"arn"`
+}
+
+// Notification event object metadata.
+type objectMeta struct {
+ Key string `json:"key"`
+ Size int64 `json:"size,omitempty"`
+ ETag string `json:"eTag,omitempty"`
+ VersionID string `json:"versionId,omitempty"`
+ Sequencer string `json:"sequencer"`
+}
+
+// Notification event server specific metadata.
+type eventMeta struct {
+ SchemaVersion string `json:"s3SchemaVersion"`
+ ConfigurationID string `json:"configurationId"`
+ Bucket bucketMeta `json:"bucket"`
+ Object objectMeta `json:"object"`
+}
+
+// NotificationEvent represents an Amazon an S3 bucket notification event.
+type NotificationEvent struct {
+ EventVersion string `json:"eventVersion"`
+ EventSource string `json:"eventSource"`
+ AwsRegion string `json:"awsRegion"`
+ EventTime string `json:"eventTime"`
+ EventName string `json:"eventName"`
+ UserIdentity identity `json:"userIdentity"`
+ RequestParameters map[string]string `json:"requestParameters"`
+ ResponseElements map[string]string `json:"responseElements"`
+ S3 eventMeta `json:"s3"`
+}
+
+// NotificationInfo - represents the collection of notification events, additionally
+// also reports errors if any while listening on bucket notifications.
+type NotificationInfo struct {
+ Records []NotificationEvent
+ Err error
+}
+
+// ListenBucketNotification - listen on bucket notifications.
+func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
+ notificationInfoCh := make(chan NotificationInfo, 1)
+ // Only success, start a routine to start reading line by line.
+ go func(notificationInfoCh chan<- NotificationInfo) {
+ defer close(notificationInfoCh)
+
+ // Validate the bucket name.
+ if err := isValidBucketName(bucketName); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Check ARN partition to verify if listening bucket is supported
+ if isAmazonEndpoint(c.endpointURL) || isGoogleEndpoint(c.endpointURL) {
+ notificationInfoCh <- NotificationInfo{
+ Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
+ }
+ return
+ }
+
+ // Continously run and listen on bucket notification.
+ for {
+ urlValues := make(url.Values)
+ urlValues.Set("prefix", prefix)
+ urlValues.Set("suffix", suffix)
+ urlValues["events"] = events
+
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("GET", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ })
+ if err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+
+ // Validate http response, upon error return quickly.
+ if resp.StatusCode != http.StatusOK {
+ errResponse := httpRespToErrorResponse(resp, bucketName, "")
+ notificationInfoCh <- NotificationInfo{
+ Err: errResponse,
+ }
+ return
+ }
+
+ // Initialize a new bufio scanner, to read line by line.
+ bio := bufio.NewScanner(resp.Body)
+
+ // Close the response body.
+ defer resp.Body.Close()
+
+ // Unmarshal each line, returns marshalled values.
+ for bio.Scan() {
+ var notificationInfo NotificationInfo
+ if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+ // Send notifications on channel only if there are events received.
+ if len(notificationInfo.Records) > 0 {
+ select {
+ case notificationInfoCh <- notificationInfo:
+ case <-doneCh:
+ return
+ }
+ }
+ }
+ // Look for any underlying errors.
+ if err = bio.Err(); err != nil {
+ // For an unexpected connection drop from server, we close the body
+ // and re-connect.
+ if err == io.ErrUnexpectedEOF {
+ resp.Body.Close()
+ continue
+ }
+ notificationInfoCh <- NotificationInfo{
+ Err: err,
+ }
+ return
+ }
+ }
+ }(notificationInfoCh)
+
+ // Returns the notification info channel, for caller to start reading from.
+ return notificationInfoCh
+}
diff --git a/api-put-bucket.go b/api-put-bucket.go
index e8d8217..3c9f438 100644
--- a/api-put-bucket.go
+++ b/api-put-bucket.go
@@ -26,7 +26,8 @@ import (
"io/ioutil"
"net/http"
"net/url"
- "reflect"
+
+ "github.com/minio/minio-go/pkg/policy"
)
/// Bucket operations
@@ -149,7 +150,7 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// readonly - anonymous get access for everyone at a given object prefix.
// readwrite - anonymous list/put/delete access to a given object prefix.
// writeonly - anonymous put/delete access to a given object prefix.
-func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy BucketPolicy) error {
+func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
@@ -157,57 +158,35 @@ func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPo
if err := isValidObjectPrefix(objectPrefix); err != nil {
return err
}
- if !bucketPolicy.isValidBucketPolicy() {
+ if !bucketPolicy.IsValidBucketPolicy() {
return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
}
- policy, err := c.getBucketPolicy(bucketName, objectPrefix)
+ policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil {
return err
}
- // For bucket policy set to 'none' we need to remove the policy.
- if bucketPolicy == BucketPolicyNone && policy.Statements == nil {
- // No policy exists on the given prefix so return with ErrNoSuchBucketPolicy.
- return ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
- }
- // Remove any previous policies at this path.
- statements := removeBucketPolicyStatement(policy.Statements, bucketName, objectPrefix)
- // generating []Statement for the given bucketPolicy.
- generatedStatements, err := generatePolicyStatement(bucketPolicy, bucketName, objectPrefix)
- if err != nil {
- return err
- }
- statements = append(statements, generatedStatements...)
-
- // No change in the statements indicates either an attempt of setting 'none'
- // on a prefix which doesn't have a pre-existing policy, or setting a policy
- // on a prefix which already has the same policy.
- if reflect.DeepEqual(policy.Statements, statements) {
- // If policy being set is 'none' return an error, otherwise return nil to
- // prevent the unnecessary request from being sent
- var err error
- if bucketPolicy == BucketPolicyNone {
- err = ErrNoSuchBucketPolicy(fmt.Sprintf("No policy exists on %s/%s", bucketName, objectPrefix))
- } else {
- err = nil
- }
- return err
+ if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil {
+ // As the request is for removing policy and the bucket
+ // has empty policy statements, just return success.
+ return nil
}
- policy.Statements = statements
+ policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix)
+
// Save the updated policies.
- return c.putBucketPolicy(bucketName, policy)
+ return c.putBucketPolicy(bucketName, policyInfo)
}
// Saves a new bucket policy.
-func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) error {
+func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return err
}
// If there are no policy statements, we should remove entire policy.
- if len(policy.Statements) == 0 {
+ if len(policyInfo.Statements) == 0 {
return c.removeBucketPolicy(bucketName)
}
@@ -216,7 +195,7 @@ func (c Client) putBucketPolicy(bucketName string, policy BucketAccessPolicy) er
urlValues := make(url.Values)
urlValues.Set("policy", "")
- policyBytes, err := json.Marshal(&policy)
+ policyBytes, err := json.Marshal(&policyInfo)
if err != nil {
return err
}
@@ -267,3 +246,49 @@ func (c Client) removeBucketPolicy(bucketName string) error {
}
return nil
}
+
+// SetBucketNotification saves a new bucket notification.
+func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error {
+ // Input validation.
+ if err := isValidBucketName(bucketName); err != nil {
+ return err
+ }
+
+ // Get resources properly escaped and lined up before
+ // using them in http request.
+ urlValues := make(url.Values)
+ urlValues.Set("notification", "")
+
+ notifBytes, err := xml.Marshal(bucketNotification)
+ if err != nil {
+ return err
+ }
+
+ notifBuffer := bytes.NewReader(notifBytes)
+ reqMetadata := requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: notifBuffer,
+ contentLength: int64(len(notifBytes)),
+ contentMD5Bytes: sumMD5(notifBytes),
+ contentSHA256Bytes: sum256(notifBytes),
+ }
+
+ // Execute PUT to upload a new bucket notification.
+ resp, err := c.executeMethod("PUT", reqMetadata)
+ defer closeResponse(resp)
+ if err != nil {
+ return err
+ }
+ if resp != nil {
+ if resp.StatusCode != http.StatusOK {
+ return httpRespToErrorResponse(resp, bucketName, "")
+ }
+ }
+ return nil
+}
+
+// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config
+func (c Client) RemoveAllBucketNotification(bucketName string) error {
+ return c.SetBucketNotification(bucketName, BucketNotification{})
+}
diff --git a/api-put-object-copy.go b/api-put-object-copy.go
index 45d5693..c7cd46d 100644
--- a/api-put-object-copy.go
+++ b/api-put-object-copy.go
@@ -38,7 +38,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
}
// Set copy source.
- customHeaders.Set("x-amz-copy-source", objectSource)
+ customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{
diff --git a/api-put-object-file.go b/api-put-object-file.go
index 2662bd6..deaed0a 100644
--- a/api-put-object-file.go
+++ b/api-put-object-file.go
@@ -151,7 +151,7 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
var totalUploadedSize int64
// Complete multipart upload.
- var completeMultipartUpload completeMultipartUpload
+ var complMultipartUpload completeMultipartUpload
// A map of all uploaded parts.
var partsInfo = make(map[int]objectPart)
@@ -167,90 +167,137 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
}
// Calculate the optimal parts info for a given size.
- totalPartsCount, partSize, _, err := optimalPartInfo(fileSize)
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
return 0, err
}
- // Part number always starts with '1'.
- partNumber := 1
+ // Create a channel to communicate a part was uploaded.
+ // Buffer this to 10000, the maximum number of parts allowed by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
- for partNumber <= totalPartsCount {
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
+ // Create a channel to communicate which part to upload.
+ // Buffer this to 10000, the maximum number of parts allowed by S3.
+ uploadPartsCh := make(chan int, 10000)
- // Add hash algorithms that need to be calculated by computeHash()
- // In case of a non-v4 signature or https connection, sha256 is not needed.
- hashAlgos := make(map[string]hash.Hash)
- hashSums := make(map[string][]byte)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
- }
+ // Just for readability.
+ lastPartNumber := totalPartsCount
- var prtSize int64
- prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
- if err != nil {
- return 0, err
- }
+ // Send each part through the partUploadCh to be uploaded.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- p
+ }
+ close(uploadPartsCh)
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(sectionReader, progress)
-
- // Verify if part should be uploaded.
- if shouldUploadPart(objectPart{
- ETag: hex.EncodeToString(hashSums["md5"]),
- PartNumber: partNumber,
- Size: prtSize,
- }, partsInfo) {
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
- hashSums["md5"], hashSums["sha256"], prtSize)
- if err != nil {
- return totalUploadedSize, err
- }
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
- } else {
- // Update the progress reader for the skipped part.
- if progress != nil {
- if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
- return totalUploadedSize, err
+ // Use three 'workers' to upload parts in parallel.
+ for w := 1; w <= 3; w++ {
+ go func() {
+ // Deal with each part as it comes through the channel.
+ for partNumber := range uploadPartsCh {
+ // Add hash algorithms that need to be calculated by computeHash()
+ // In case of a non-v4 signature or https connection, sha256 is not needed.
+ hashAlgos := make(map[string]hash.Hash)
+ hashSums := make(map[string][]byte)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
}
- }
- }
- // Save successfully uploaded size.
- totalUploadedSize += prtSize
+ // Create the part to be uploaded.
+ verifyObjPart := objectPart{
+ ETag: hex.EncodeToString(hashSums["md5"]),
+ PartNumber: partNumber,
+ Size: partSize,
+ }
+ // If this is the last part do not give it the full part size.
+ if partNumber == lastPartNumber {
+ verifyObjPart.Size = lastPartSize
+ }
- // Increment part number.
- partNumber++
- }
+ // Verify if part should be uploaded.
+ if shouldUploadPart(verifyObjPart, partsInfo) {
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(partNumber-1) * partSize
+ missingPartSize := partSize
- // Verify if we uploaded all data.
- if totalUploadedSize != fileSize {
- return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if partNumber == lastPartNumber {
+ readOffset = (fileSize - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
+ var prtSize int64
+ prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Proceed to upload the part.
+ var objPart objectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+ }
+ // Return through the channel the part size.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: verifyObjPart.Size,
+ PartNum: partNumber,
+ Error: nil,
+ }
+ }
+ }()
}
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
+ // Retrieve each uploaded part once it is done.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
+ }
+ // Retrieve each uploaded part and store it to be completed.
+ part, ok := partsInfo[uploadRes.PartNum]
+ if !ok {
+ return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
+ }
+ // Update the total uploaded size.
+ totalUploadedSize += uploadRes.Size
+ // Update the progress bar if there is one.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
+ return totalUploadedSize, err
+ }
+ }
+ // Store the part to be completed.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
}
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(completeMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
+ // Verify if we uploaded all data.
+ if totalUploadedSize != fileSize {
+ return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
}
// Sort all completed parts.
- sort.Sort(completedParts(completeMultipartUpload.Parts))
- _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
+ sort.Sort(completedParts(complMultipartUpload.Parts))
+ _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}
diff --git a/api-put-object-multipart.go b/api-put-object-multipart.go
index c8332d8..cdd3f53 100644
--- a/api-put-object-multipart.go
+++ b/api-put-object-multipart.go
@@ -22,6 +22,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/xml"
+ "fmt"
"hash"
"io"
"io/ioutil"
@@ -93,7 +94,8 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// If This session is a continuation of a previous session fetch all
- // previously uploaded parts info.
+ // previously uploaded parts info and as a special case only fetch partsInfo
+ // for only known upload size.
if !isNew {
// Fetch previously uploaded parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
@@ -115,7 +117,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
tmpBuffer := new(bytes.Buffer)
for partNumber <= totalPartsCount {
-
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
// with non-v4 signature request or HTTPS connection
hashSums := make(map[string][]byte)
@@ -169,14 +170,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Save successfully uploaded size.
totalUploadedSize += prtSize
+ // Increment part number.
+ partNumber++
+
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if size < 0 && rErr == io.EOF {
break
}
-
- // Increment part number.
- partNumber++
}
// Verify if we uploaded all the data.
@@ -186,19 +187,17 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
}
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
- }
-
- if size > 0 {
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts))
+ // Loop over total uploaded parts to save them in
+ // Parts array before completing the multipart request.
+ for i := 1; i < partNumber; i++ {
+ part, ok := partsInfo[i]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
}
// Sort all completed parts.
diff --git a/api-put-object-progress.go b/api-put-object-progress.go
index ebbc380..0f79e70 100644
--- a/api-put-object-progress.go
+++ b/api-put-object-progress.go
@@ -16,7 +16,10 @@
package minio
-import "io"
+import (
+ "io"
+ "strings"
+)
// PutObjectWithProgress - With progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
@@ -91,7 +94,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
- if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
+ if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
diff --git a/api-put-object-readat.go b/api-put-object-readat.go
index cd607d5..14fa4b2 100644
--- a/api-put-object-readat.go
+++ b/api-put-object-readat.go
@@ -20,12 +20,20 @@ import (
"bytes"
"crypto/md5"
"crypto/sha256"
+ "fmt"
"hash"
"io"
"io/ioutil"
"sort"
)
+// uploadedPartRes - the response received from a part upload.
+type uploadedPartRes struct {
+ Error error // Any error encountered while uploading the part.
+ PartNum int // Number of the part uploaded.
+ Size int64 // Size of the part uploaded.
+}
+
// shouldUploadPartReadAt - verify if part should be uploaded.
func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
// If part not found part should be uploaded.
@@ -89,110 +97,136 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return 0, err
}
- // Used for readability, lastPartNumber is always
- // totalPartsCount.
+ // Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
- // partNumber always starts with '1'.
- partNumber := 1
+ // Declare a channel that sends the next part number to be uploaded.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadPartsCh := make(chan int, 10000)
- // Initialize a temporary buffer.
- tmpBuffer := new(bytes.Buffer)
+ // Declare a channel that sends back the response of a part upload.
+ // Buffered to 10000 because thats the maximum number of parts allowed
+ // by S3.
+ uploadedPartsCh := make(chan uploadedPartRes, 10000)
- // Read defaults to reading at 5MiB buffer.
- readAtBuffer := make([]byte, optimalReadBufferSize)
-
- // Upload all the missing parts.
- for partNumber <= lastPartNumber {
- // Verify object if its uploaded.
- verifyObjPart := objectPart{
- PartNumber: partNumber,
- Size: partSize,
- }
- // Special case if we see a last part number, save last part
- // size as the proper part size.
- if partNumber == lastPartNumber {
- verifyObjPart = objectPart{
- PartNumber: lastPartNumber,
- Size: lastPartSize,
- }
- }
+ // Send each part number to the channel to be processed.
+ for p := 1; p <= totalPartsCount; p++ {
+ uploadPartsCh <- p
+ }
+ close(uploadPartsCh)
+
+ // Receive each part number from the channel allowing three parallel uploads.
+ for w := 1; w <= 3; w++ {
+ go func() {
+ // Read defaults to reading at 5MiB buffer.
+ readAtBuffer := make([]byte, optimalReadBufferSize)
+
+ // Each worker will draw from the part channel and upload in parallel.
+ for partNumber := range uploadPartsCh {
+ // Declare a new tmpBuffer.
+ tmpBuffer := new(bytes.Buffer)
+
+ // Verify object if its uploaded.
+ verifyObjPart := objectPart{
+ PartNumber: partNumber,
+ Size: partSize,
+ }
+ // Special case if we see a last part number, save last part
+ // size as the proper part size.
+ if partNumber == lastPartNumber {
+ verifyObjPart.Size = lastPartSize
+ }
- // Verify if part should be uploaded.
- if !shouldUploadPartReadAt(verifyObjPart, partsInfo) {
- // Increment part number when not uploaded.
- partNumber++
- if progress != nil {
- // Update the progress reader for the skipped part.
- if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil {
- return 0, err
+ // Only upload the necessary parts. Otherwise return size through channel
+ // to update any progress bar.
+ if shouldUploadPartReadAt(verifyObjPart, partsInfo) {
+ // If partNumber was not uploaded we calculate the missing
+ // part offset and size. For all other part numbers we
+ // calculate offset based on multiples of partSize.
+ readOffset := int64(partNumber-1) * partSize
+ missingPartSize := partSize
+
+ // As a special case if partNumber is lastPartNumber, we
+ // calculate the offset based on the last part size.
+ if partNumber == lastPartNumber {
+ readOffset = (size - lastPartSize)
+ missingPartSize = lastPartSize
+ }
+
+ // Get a section reader on a particular offset.
+ sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
+
+ // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
+ // Sha256 is avoided in non-v4 signature requests or HTTPS connections
+ hashSums := make(map[string][]byte)
+ hashAlgos := make(map[string]hash.Hash)
+ hashAlgos["md5"] = md5.New()
+ if c.signature.isV4() && !c.secure {
+ hashAlgos["sha256"] = sha256.New()
+ }
+
+ var prtSize int64
+ prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
+ if err != nil {
+ // Send the error back through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+
+ // Proceed to upload the part.
+ var objPart objectPart
+ objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
+ if err != nil {
+ uploadedPartsCh <- uploadedPartRes{
+ Size: 0,
+ Error: err,
+ }
+ // Exit the goroutine.
+ return
+ }
+ // Save successfully uploaded part metadata.
+ partsInfo[partNumber] = objPart
+ }
+ // Send successful part info through the channel.
+ uploadedPartsCh <- uploadedPartRes{
+ Size: verifyObjPart.Size,
+ PartNum: partNumber,
+ Error: nil,
}
}
- continue
- }
-
- // If partNumber was not uploaded we calculate the missing
- // part offset and size. For all other part numbers we
- // calculate offset based on multiples of partSize.
- readOffset := int64(partNumber-1) * partSize
- missingPartSize := partSize
-
- // As a special case if partNumber is lastPartNumber, we
- // calculate the offset based on the last part size.
- if partNumber == lastPartNumber {
- readOffset = (size - lastPartSize)
- missingPartSize = lastPartSize
- }
-
- // Get a section reader on a particular offset.
- sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
+ }()
+ }
- // Choose the needed hash algorithms to be calculated by hashCopyBuffer.
- // Sha256 is avoided in non-v4 signature requests or HTTPS connections
- hashSums := make(map[string][]byte)
- hashAlgos := make(map[string]hash.Hash)
- hashAlgos["md5"] = md5.New()
- if c.signature.isV4() && !c.secure {
- hashAlgos["sha256"] = sha256.New()
+ // Gather the responses as they occur and update any
+ // progress bar.
+ for u := 1; u <= totalPartsCount; u++ {
+ uploadRes := <-uploadedPartsCh
+ if uploadRes.Error != nil {
+ return totalUploadedSize, uploadRes.Error
}
-
- var prtSize int64
- prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
- if err != nil {
- return 0, err
+ // Retrieve each uploaded part and store it to be completed.
+ part, ok := partsInfo[uploadRes.PartNum]
+ if !ok {
+ return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
-
- var reader io.Reader
- // Update progress reader appropriately to the latest offset
- // as we read from the source.
- reader = newHook(tmpBuffer, progress)
-
- // Proceed to upload the part.
- var objPart objectPart
- objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
- if err != nil {
- // Reset the buffer upon any error.
- tmpBuffer.Reset()
- return 0, err
+ // Update the totalUploadedSize.
+ totalUploadedSize += uploadRes.Size
+ // Update the progress bar if there is one.
+ if progress != nil {
+ if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
+ return totalUploadedSize, err
+ }
}
-
- // Save successfully uploaded part metadata.
- partsInfo[partNumber] = objPart
-
- // Increment part number here after successful part upload.
- partNumber++
-
- // Reset the buffer.
- tmpBuffer.Reset()
- }
-
- // Loop over uploaded parts to save them in a Parts array before completing the multipart request.
- for _, part := range partsInfo {
- var complPart completePart
- complPart.ETag = part.ETag
- complPart.PartNumber = part.PartNumber
- totalUploadedSize += part.Size
- complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
+ // Store the parts to be completed in order.
+ complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
+ ETag: part.ETag,
+ PartNumber: part.PartNumber,
+ })
}
// Verify if we uploaded all the data.
@@ -200,11 +234,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
- // Verify if totalPartsCount is not equal to total list of parts.
- if totalPartsCount != len(complMultipartUpload.Parts) {
- return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts))
- }
-
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
diff --git a/api-put-object.go b/api-put-object.go
index ba846f9..f7dd2da 100644
--- a/api-put-object.go
+++ b/api-put-object.go
@@ -221,6 +221,9 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
+ if err != nil {
+ return 0, err
+ }
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
diff --git a/api-remove.go b/api-remove.go
index 46d47ab..110a73e 100644
--- a/api-remove.go
+++ b/api-remove.go
@@ -17,6 +17,9 @@
package minio
import (
+ "bytes"
+ "encoding/xml"
+ "io"
"net/http"
"net/url"
)
@@ -74,6 +77,125 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
return nil
}
+// RemoveObjectError - container of Multi Delete S3 API error
+type RemoveObjectError struct {
+ ObjectName string
+ Err error
+}
+
+// generateRemoveMultiObjects - generate the XML request for remove multi objects request
+func generateRemoveMultiObjectsRequest(objects []string) []byte {
+ rmObjects := []deleteObject{}
+ for _, obj := range objects {
+ rmObjects = append(rmObjects, deleteObject{Key: obj})
+ }
+ xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
+ return xmlBytes
+}
+
+// processRemoveMultiObjectsResponse - parse the remove multi objects web service
+// and return the success/failure result status for each object
+func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
+ // Parse multi delete XML response
+ rmResult := &deleteMultiObjectsResult{}
+ err := xmlDecoder(body, rmResult)
+ if err != nil {
+ errorCh <- RemoveObjectError{ObjectName: "", Err: err}
+ return
+ }
+
+ // Fill deletion that returned an error.
+ for _, obj := range rmResult.UnDeletedObjects {
+ errorCh <- RemoveObjectError{
+ ObjectName: obj.Key,
+ Err: ErrorResponse{
+ Code: obj.Code,
+ Message: obj.Message,
+ },
+ }
+ }
+}
+
+// RemoveObjects remove multiples objects from a bucket.
+// The list of objects to remove are received from objectsCh.
+// Remove failures are sent back via error channel.
+func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
+ errorCh := make(chan RemoveObjectError, 1)
+
+ // Validate if bucket name is valid.
+ if err := isValidBucketName(bucketName); err != nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: err,
+ }
+ return errorCh
+ }
+ // Validate objects channel to be properly allocated.
+ if objectsCh == nil {
+ defer close(errorCh)
+ errorCh <- RemoveObjectError{
+ Err: ErrInvalidArgument("Objects channel cannot be nil"),
+ }
+ return errorCh
+ }
+
+ // Generate and call MultiDelete S3 requests based on entries received from objectsCh
+ go func(errorCh chan<- RemoveObjectError) {
+ maxEntries := 1000
+ finish := false
+ urlValues := make(url.Values)
+ urlValues.Set("delete", "")
+
+ // Close error channel when Multi delete finishes.
+ defer close(errorCh)
+
+ // Loop over entries by 1000 and call MultiDelete requests
+ for {
+ if finish {
+ break
+ }
+ count := 0
+ var batch []string
+
+ // Try to gather 1000 entries
+ for object := range objectsCh {
+ batch = append(batch, object)
+ if count++; count >= maxEntries {
+ break
+ }
+ }
+ if count < maxEntries {
+ // We didn't have 1000 entries, so this is the last batch
+ finish = true
+ }
+
+ // Generate remove multi objects XML request
+ removeBytes := generateRemoveMultiObjectsRequest(batch)
+ // Execute GET on bucket to list objects.
+ resp, err := c.executeMethod("POST", requestMetadata{
+ bucketName: bucketName,
+ queryValues: urlValues,
+ contentBody: bytes.NewReader(removeBytes),
+ contentLength: int64(len(removeBytes)),
+ contentMD5Bytes: sumMD5(removeBytes),
+ contentSHA256Bytes: sum256(removeBytes),
+ })
+ if err != nil {
+ for _, b := range batch {
+ errorCh <- RemoveObjectError{ObjectName: b, Err: err}
+ }
+ continue
+ }
+
+ // Process multiobjects remove xml response
+ processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
+
+ closeResponse(resp)
+ }
+ }(errorCh)
+ return errorCh
+}
+
// RemoveIncompleteUpload aborts an partially uploaded object.
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {
diff --git a/api-s3-datatypes.go b/api-s3-datatypes.go
index a07bbcf..52e8a12 100644
--- a/api-s3-datatypes.go
+++ b/api-s3-datatypes.go
@@ -206,3 +206,38 @@ type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
+
+// deleteObject container for Delete element in MultiObjects Delete XML request
+type deleteObject struct {
+ Key string
+ VersionId string `xml:"VersionId,omitempty"`
+}
+
+// deletedObject container for Deleted element in MultiObjects Delete XML response
+type deletedObject struct {
+ Key string
+ VersionId string `xml:"VersionId,omitempty"`
+ DeleteMarker bool
+ DeleteMarkerVersionId string
+}
+
+// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
+type nonDeletedObject struct {
+ Key string
+ Code string
+ Message string
+}
+
+// deletedMultiObjects container for MultiObjects Delete XML request
+type deleteMultiObjects struct {
+ XMLName xml.Name `xml:"Delete"`
+ Quiet bool
+ Objects []deleteObject `xml:"Object"`
+}
+
+// deletedMultiObjectsResult container for MultiObjects Delete XML response
+type deleteMultiObjectsResult struct {
+ XMLName xml.Name `xml:"DeleteResult"`
+ DeletedObjects []deletedObject `xml:"Deleted"`
+ UnDeletedObjects []nonDeletedObject `xml:"Error"`
+}
diff --git a/api-stat.go b/api-stat.go
index b5db7fe..976d612 100644
--- a/api-stat.go
+++ b/api-stat.go
@@ -24,10 +24,10 @@ import (
)
// BucketExists verify if bucket exists and you have permission to access it.
-func (c Client) BucketExists(bucketName string) error {
+func (c Client) BucketExists(bucketName string) (bool, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
- return err
+ return false, err
}
// Execute HEAD on bucketName.
@@ -36,14 +36,17 @@ func (c Client) BucketExists(bucketName string) error {
})
defer closeResponse(resp)
if err != nil {
- return err
+ if ToErrorResponse(err).Code == "NoSuchBucket" {
+ return false, nil
+ }
+ return false, err
}
if resp != nil {
if resp.StatusCode != http.StatusOK {
- return httpRespToErrorResponse(resp, bucketName, "")
+ return false, httpRespToErrorResponse(resp, bucketName, "")
}
}
- return nil
+ return true, nil
}
// StatObject verifies if object exists and you have permission to access.
diff --git a/api.go b/api.go
index 172e3c4..9549270 100644
--- a/api.go
+++ b/api.go
@@ -73,7 +73,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "2.0.1"
+ libraryVersion = "2.0.2"
)
// User Agent should always following the below style.
@@ -589,7 +589,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// set sha256 sum for signature calculation only with
// signature version '4'.
if c.signature.isV4() {
- shaHeader := "UNSIGNED-PAYLOAD"
+ shaHeader := unsignedPayload
if !c.secure {
if metadata.contentSHA256Bytes == nil {
shaHeader = hex.EncodeToString(sum256([]byte{}))
diff --git a/api_functional_v2_test.go b/api_functional_v2_test.go
deleted file mode 100644
index 718d444..0000000
--- a/api_functional_v2_test.go
+++ /dev/null
@@ -1,1310 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio_test
-
-import (
- "bytes"
- crand "crypto/rand"
- "errors"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/url"
- "os"
- "testing"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-// Tests bucket re-create errors.
-func TestMakeBucketErrorV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'eu-west-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if err = c.MakeBucket(bucketName, "eu-west-1"); err == nil {
- t.Fatal("Error: make bucket should should fail for", bucketName)
- }
- // Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-}
-
-// Test get object reader to not throw error on being closed twice.
-func TestGetObjectClosedTwiceV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests removing partially uploaded objects.
-func TestRemovePartiallyUploadedV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, err = io.CopyN(writer, crand.Reader, 128*1024)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- i++
- }
- writer.CloseWithError(errors.New("Proactively closed to be verified later."))
- }()
-
- objectName := bucketName + "-resumable"
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err == nil {
- t.Fatal("Error: PutObject should fail.")
- }
- if err.Error() != "Proactively closed to be verified later." {
- t.Fatal("Error:", err)
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests resumable put object cloud to cloud.
-func TestResumablePutObjectV2(t *testing.T) {
- // By passing 'go test -short' skips these tests.
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Create a temporary file.
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Copy 11MiB worth of random data.
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- t.Fatal("Error:", err)
- }
-
- // New object name.
- objectName := bucketName + "-resumable"
-
- // Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Get the uploaded object.
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Get object info.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != objInfo.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
- }
-
- // Remove all temp files, objects and bucket.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-put")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-// Tests FPutObject hidden contentType setting
-func TestFPutObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a temp file with 11*1024*1024 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Add extension to temp file name
- fileName := file.Name()
- err = os.Rename(file.Name(), fileName+".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-standard")
- }
- if rStandard.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-Octet")
- }
- if rOctet.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-GTar")
- }
- if rGTar.ContentType != "application/x-gtar" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(fileName + ".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-// Tests resumable file based put object multipart upload.
-func TestResumableFPutObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- objectName := bucketName + "-resumable"
-
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- file.Close()
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests various bucket supported formats.
-func TestMakeBucketRegionsV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-west-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-}
-
-// Tests get object ReaderSeeker interface methods.
-func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
- n, err = r.Seek(offset, 0)
- if err != nil {
- t.Fatal("Error:", err, offset)
- }
- if n != offset {
- t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
- offset, n)
- }
- n, err = r.Seek(0, 1)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != offset {
- t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
- offset, n)
- }
- _, err = r.Seek(offset, 2)
- if err == nil {
- t.Fatal("Error: seek on positive offset for whence '2' should error out")
- }
- n, err = r.Seek(-offset, 2)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != 0 {
- t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
- }
-
- var buffer1 bytes.Buffer
- if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- if !bytes.Equal(buf, buffer1.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- // Seek again and read again.
- n, err = r.Seek(offset-1, 0)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != (offset - 1) {
- t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
- }
-
- var buffer2 bytes.Buffer
- if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- // Verify now lesser bytes.
- if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests get object ReaderAt interface methods.
-func TestGetObjectReadAtFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
-
- // Read directly
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- m, err := r.ReadAt(buf2, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf2), offset)
- }
- if m != len(buf2) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf3), offset)
- }
- if m != len(buf3) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf4), offset)
- }
- if m != len(buf4) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf5))
- }
- }
- if m != len(buf5) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
- }
- if !bytes.Equal(buf, buf5) {
- t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf6))
- }
- }
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests copy object
-func TestCopyObjectV2(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping functional tests for short runs")
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy")
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
- }
-
- // Set copy conditions.
- copyConds := minio.NewCopyConditions()
- err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Copy source.
- copySource := bucketName + "/" + objectName
-
- // Perform the Copy
- err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
- }
-
- // Source object
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Check the various fields of source object against destination object.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if objInfo.Size != objInfoCopy.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
- }
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName + "-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests comprehensive list of all methods.
-func TestFunctionalV2(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := minio.NewV2(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
- file, err := os.Create(fileName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- err = c.BucketExists(bucketName)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make the bucket 'public read/write'.
- err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // List all buckets.
- buckets, err := c.ListBuckets()
- if len(buckets) == 0 {
- t.Fatal("Error: list buckets cannot be empty", buckets)
- }
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- t.Fatal("Error: bucket ", bucketName, "not found")
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := make([]byte, rand.Intn(1<<19))
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if n != int64(len(buf)) {
- t.Fatal("Error: bad length ", n, len(buf))
- }
-
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-nolength")
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- objFound = false
- isRecursive = true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- incompObjNotFound := true
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- t.Fatal("Error: unexpected dangling incomplete upload found.")
- }
-
- newReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Generate presigned GET object url.
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- // Generate presigned GET object url.
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch for presigned GET url.")
- }
- // Verify content disposition.
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
- }
-
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- buf = make([]byte, rand.Intn(1<<20))
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- t.Fatal("Error: ", err)
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-nolength")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err == nil {
- t.Fatal("Error:")
- }
- if err.Error() != "The specified bucket does not exist" {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName); err != nil {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- t.Fatal("Error: ", err)
- }
-}
diff --git a/api_functional_v4_test.go b/api_functional_v4_test.go
deleted file mode 100644
index 25fc50d..0000000
--- a/api_functional_v4_test.go
+++ /dev/null
@@ -1,1380 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio_test
-
-import (
- "bytes"
- crand "crypto/rand"
- "errors"
- "io"
- "io/ioutil"
- "math/rand"
- "net/http"
- "net/url"
- "os"
- "testing"
- "time"
-
- "github.com/minio/minio-go"
-)
-
-const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569"
-const (
- letterIdxBits = 6 // 6 bits to represent a letter index
- letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits
- letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits
-)
-
-func randString(n int, src rand.Source) string {
- b := make([]byte, n)
- // A rand.Int63() generates 63 random bits, enough for letterIdxMax letters!
- for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; {
- if remain == 0 {
- cache, remain = src.Int63(), letterIdxMax
- }
- if idx := int(cache & letterIdxMask); idx < len(letterBytes) {
- b[i] = letterBytes[idx]
- i--
- }
- cache >>= letterIdxBits
- remain--
- }
- return string(b[0:30])
-}
-
-// Tests bucket re-create errors.
-func TestMakeBucketError(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- if err = c.MakeBucket(bucketName, "eu-central-1"); err == nil {
- t.Fatal("Error: make bucket should should fail for", bucketName)
- }
- // Verify valid error response from server.
- if minio.ToErrorResponse(err).Code != "BucketAlreadyExists" &&
- minio.ToErrorResponse(err).Code != "BucketAlreadyOwnedByYou" {
- t.Fatal("Error: Invalid error returned by server", err)
- }
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-}
-
-// Tests various bucket supported formats.
-func TestMakeBucketRegions(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'eu-central-1'.
- if err = c.MakeBucket(bucketName, "eu-central-1"); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- if err = c.RemoveBucket(bucketName); err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket with '.' in its name, in 'us-west-2'. This
- // request is internally staged into a path style instead of
- // virtual host style.
- if err = c.MakeBucket(bucketName+".withperiod", "us-west-2"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-
- // Remove the newly created bucket.
- if err = c.RemoveBucket(bucketName + ".withperiod"); err != nil {
- t.Fatal("Error:", err, bucketName+".withperiod")
- }
-}
-
-// Test get object reader to not throw error on being closed twice.
-func TestGetObjectClosedTwice(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
- if err := r.Close(); err != nil {
- t.Fatal("Error:", err)
- }
- if err := r.Close(); err == nil {
- t.Fatal("Error: object is already closed, should return error")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests removing partially uploaded objects.
-func TestRemovePartiallyUploaded(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping function tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- reader, writer := io.Pipe()
- go func() {
- i := 0
- for i < 25 {
- _, err = io.CopyN(writer, crand.Reader, 128*1024)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
- i++
- }
- err := writer.CloseWithError(errors.New("Proactively closed to be verified later."))
- if err != nil {
- t.Fatal("Error:", err)
- }
- }()
-
- objectName := bucketName + "-resumable"
- _, err = c.PutObject(bucketName, objectName, reader, "application/octet-stream")
- if err == nil {
- t.Fatal("Error: PutObject should fail.")
- }
- if err.Error() != "Proactively closed to be verified later." {
- t.Fatal("Error:", err)
- }
- err = c.RemoveIncompleteUpload(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests resumable put object cloud to cloud.
-func TestResumablePutObject(t *testing.T) {
- // By passing 'go test -short' skips these tests.
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Create a temporary file.
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Copy 11MiB worth of random data.
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- if err = file.Close(); err != nil {
- t.Fatal("Error:", err)
- }
-
- // New object name.
- objectName := bucketName + "-resumable"
-
- // Upload the file.
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Get the uploaded object.
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Upload now cloud to cloud.
- n, err = c.PutObject(bucketName, objectName+"-put", reader, "application/octest-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Get object info.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != objInfo.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", objInfo.Size, n)
- }
-
- // Remove all temp files, objects and bucket.
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-put")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests resumable file based put object multipart upload.
-func TestResumableFPutObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Enable tracing, write to stdout.
- // c.TraceOn(os.Stderr)
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- file, err := ioutil.TempFile(os.TempDir(), "resumable")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- objectName := bucketName + "-resumable"
-
- n, err = c.FPutObject(bucketName, objectName, file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(file.Name())
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests FPutObject hidden contentType setting
-func TestFPutObject(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a temp file with 11*1024*1024 bytes of data.
- file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Close the file pro-actively for windows.
- err = file.Close()
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Set base object name
- objectName := bucketName + "FPutObject"
-
- // Perform standard FPutObject with contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-standard", file.Name(), "application/octet-stream")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/octet-stream)
- n, err = c.FPutObject(bucketName, objectName+"-Octet", file.Name(), "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Add extension to temp file name
- fileName := file.Name()
- err = os.Rename(file.Name(), fileName+".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Perform FPutObject with no contentType provided (Expecting application/x-gtar)
- n, err = c.FPutObject(bucketName, objectName+"-GTar", fileName+".gtar", "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != int64(11*1024*1024) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", 11*1024*1024, n)
- }
-
- // Check headers
- rStandard, err := c.StatObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-standard")
- }
- if rStandard.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rOctet, err := c.StatObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-Octet")
- }
- if rOctet.ContentType != "application/octet-stream" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/octet-stream", rStandard.ContentType)
- }
-
- rGTar, err := c.StatObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-GTar")
- }
- if rGTar.ContentType != "application/x-gtar" {
- t.Fatalf("Error: Content-Type headers mismatched, want %v, got %v\n",
- "application/x-gtar", rStandard.ContentType)
- }
-
- // Remove all objects and bucket and temp file
- err = c.RemoveObject(bucketName, objectName+"-standard")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-Octet")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveObject(bucketName, objectName+"-GTar")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = os.Remove(fileName + ".gtar")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
-}
-
-// Tests get object ReaderSeeker interface methods.
-func TestGetObjectReadSeekFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
- n, err = r.Seek(offset, 0)
- if err != nil {
- t.Fatal("Error:", err, offset)
- }
- if n != offset {
- t.Fatalf("Error: number of bytes seeked does not match, want %v, got %v\n",
- offset, n)
- }
- n, err = r.Seek(0, 1)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != offset {
- t.Fatalf("Error: number of current seek does not match, want %v, got %v\n",
- offset, n)
- }
- _, err = r.Seek(offset, 2)
- if err == nil {
- t.Fatal("Error: seek on positive offset for whence '2' should error out")
- }
- n, err = r.Seek(-offset, 2)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != 0 {
- t.Fatalf("Error: number of bytes seeked back does not match, want 0, got %v\n", n)
- }
-
- var buffer1 bytes.Buffer
- if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- if !bytes.Equal(buf, buffer1.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- // Seek again and read again.
- n, err = r.Seek(offset-1, 0)
- if err != nil {
- t.Fatal("Error:", err)
- }
- if n != (offset - 1) {
- t.Fatalf("Error: number of bytes seeked back does not match, want %v, got %v\n", offset-1, n)
- }
-
- var buffer2 bytes.Buffer
- if _, err = io.CopyN(&buffer2, r, st.Size); err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err)
- }
- }
- // Verify now lesser bytes.
- if !bytes.Equal(buf[2047:], buffer2.Bytes()) {
- t.Fatal("Error: Incorrect read bytes v/s original buffer.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests get object ReaderAt interface methods.
-func TestGetObjectReadAtFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object.
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // read the data back
- r, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- st, err := r.Stat()
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
- if st.Size != int64(len(buf)) {
- t.Fatalf("Error: number of bytes in stat does not match, want %v, got %v\n",
- len(buf), st.Size)
- }
-
- offset := int64(2048)
-
- // read directly
- buf2 := make([]byte, 512)
- buf3 := make([]byte, 512)
- buf4 := make([]byte, 512)
-
- m, err := r.ReadAt(buf2, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf2), offset)
- }
- if m != len(buf2) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf2))
- }
- if !bytes.Equal(buf2, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf3, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf3), offset)
- }
- if m != len(buf3) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf3))
- }
- if !bytes.Equal(buf3, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
- offset += 512
- m, err = r.ReadAt(buf4, offset)
- if err != nil {
- t.Fatal("Error:", err, st.Size, len(buf4), offset)
- }
- if m != len(buf4) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf4))
- }
- if !bytes.Equal(buf4, buf[offset:offset+512]) {
- t.Fatal("Error: Incorrect read between two ReadAt from same offset.")
- }
-
- buf5 := make([]byte, n)
- // Read the whole object.
- m, err = r.ReadAt(buf5, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf5))
- }
- }
- if m != len(buf5) {
- t.Fatalf("Error: ReadAt read shorter bytes before reaching EOF, want %v, got %v\n", m, len(buf5))
- }
- if !bytes.Equal(buf, buf5) {
- t.Fatal("Error: Incorrect data read in GetObject, than what was previously upoaded.")
- }
-
- buf6 := make([]byte, n+1)
- // Read the whole object and beyond.
- _, err = r.ReadAt(buf6, 0)
- if err != nil {
- if err != io.EOF {
- t.Fatal("Error:", err, len(buf6))
- }
- }
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests copy object
-func TestCopyObject(t *testing.T) {
- if testing.Short() {
- t.Skip("Skipping functional tests for short runs")
- }
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- // Instantiate new minio client object
- c, err := minio.NewV4(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable tracing, write to stderr.
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket in 'us-east-1' (source bucket).
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Make a new bucket in 'us-east-1' (destination bucket).
- err = c.MakeBucket(bucketName+"-copy", "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy")
- }
-
- // Generate data more than 32K
- buf := make([]byte, rand.Intn(1<<20)+32*1024)
-
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Save the data
- objectName := randString(60, rand.NewSource(time.Now().UnixNano()))
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName)
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match want %v, got %v",
- len(buf), n)
- }
-
- // Set copy conditions.
- copyConds := minio.NewCopyConditions()
- err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Copy source.
- copySource := bucketName + "/" + objectName
-
- // Perform the Copy
- err = c.CopyObject(bucketName+"-copy", objectName+"-copy", copySource, copyConds)
- if err != nil {
- t.Fatal("Error:", err, bucketName+"-copy", objectName+"-copy")
- }
-
- // Source object
- reader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Destination object
- readerCopy, err := c.GetObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
- // Check the various fields of source object against destination object.
- objInfo, err := reader.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- objInfoCopy, err := readerCopy.Stat()
- if err != nil {
- t.Fatal("Error:", err)
- }
- if objInfo.Size != objInfoCopy.Size {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n",
- objInfo.Size, objInfoCopy.Size)
- }
-
- // Remove all objects and buckets
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveObject(bucketName+"-copy", objectName+"-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- err = c.RemoveBucket(bucketName + "-copy")
- if err != nil {
- t.Fatal("Error:", err)
- }
-}
-
-// Tests comprehensive list of all methods.
-func TestFunctional(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping functional tests for the short runs")
- }
-
- // Seed random based on current time.
- rand.Seed(time.Now().Unix())
-
- c, err := minio.New(
- "s3.amazonaws.com",
- os.Getenv("ACCESS_KEY"),
- os.Getenv("SECRET_KEY"),
- true,
- )
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Enable to debug
- // c.TraceOn(os.Stderr)
-
- // Set user agent.
- c.SetAppInfo("Minio-go-FunctionalTest", "0.1.0")
-
- // Generate a new random bucket name.
- bucketName := randString(60, rand.NewSource(time.Now().UnixNano()))
-
- // Make a new bucket.
- err = c.MakeBucket(bucketName, "us-east-1")
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Generate a random file name.
- fileName := randString(60, rand.NewSource(time.Now().UnixNano()))
- file, err := os.Create(fileName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- for i := 0; i < 3; i++ {
- buf := make([]byte, rand.Intn(1<<19))
- _, err = file.Write(buf)
- if err != nil {
- t.Fatal("Error:", err)
- }
- }
- file.Close()
-
- // Verify if bucket exits and you have access.
- err = c.BucketExists(bucketName)
- if err != nil {
- t.Fatal("Error:", err, bucketName)
- }
-
- // Asserting the default bucket policy.
- policy, err := c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policy != "none" {
- t.Fatalf("Default bucket policy incorrect")
- }
- // Set the bucket policy to 'public readonly'.
- err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadOnly)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `readonly`.
- policy, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policy != "readonly" {
- t.Fatalf("Expected bucket policy to be readonly")
- }
-
- // Make the bucket 'public writeonly'.
- err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyWriteOnly)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `writeonly`.
- policy, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policy != "writeonly" {
- t.Fatalf("Expected bucket policy to be writeonly")
- }
- // Make the bucket 'public read/write'.
- err = c.SetBucketPolicy(bucketName, "", minio.BucketPolicyReadWrite)
- if err != nil {
- t.Fatal("Error:", err)
- }
- // should return policy `readwrite`.
- policy, err = c.GetBucketPolicy(bucketName, "")
- if err != nil {
- t.Fatal("Error:", err)
- }
- if policy != "readwrite" {
- t.Fatalf("Expected bucket policy to be readwrite")
- }
- // List all buckets.
- buckets, err := c.ListBuckets()
- if len(buckets) == 0 {
- t.Fatal("Error: list buckets cannot be empty", buckets)
- }
- if err != nil {
- t.Fatal("Error:", err)
- }
-
- // Verify if previously created bucket is listed in list buckets.
- bucketFound := false
- for _, bucket := range buckets {
- if bucket.Name == bucketName {
- bucketFound = true
- }
- }
-
- // If bucket not found error out.
- if !bucketFound {
- t.Fatal("Error: bucket ", bucketName, "not found")
- }
-
- objectName := bucketName + "unique"
-
- // Generate data
- buf := make([]byte, rand.Intn(1<<19))
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if n != int64(len(buf)) {
- t.Fatal("Error: bad length ", n, len(buf))
- }
-
- n, err = c.PutObject(bucketName, objectName+"-nolength", bytes.NewReader(buf), "binary/octet-stream")
- if err != nil {
- t.Fatal("Error:", err, bucketName, objectName+"-nolength")
- }
-
- if n != int64(len(buf)) {
- t.Fatalf("Error: number of bytes does not match, want %v, got %v\n", len(buf), n)
- }
-
- // Instantiate a done channel to close all listing.
- doneCh := make(chan struct{})
- defer close(doneCh)
-
- objFound := false
- isRecursive := true // Recursive is true.
- for obj := range c.ListObjects(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- objFound = false
- isRecursive = true // Recursive is true.
- for obj := range c.ListObjectsV2(bucketName, objectName, isRecursive, doneCh) {
- if obj.Key == objectName {
- objFound = true
- break
- }
- }
- if !objFound {
- t.Fatal("Error: object " + objectName + " not found.")
- }
-
- incompObjNotFound := true
- for objIncompl := range c.ListIncompleteUploads(bucketName, objectName, isRecursive, doneCh) {
- if objIncompl.Key != "" {
- incompObjNotFound = false
- break
- }
- }
- if !incompObjNotFound {
- t.Fatal("Error: unexpected dangling incomplete upload found.")
- }
-
- newReader, err := c.GetObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err := ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.FGetObject(bucketName, objectName, fileName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Generate presigned GET object url.
- presignedGetURL, err := c.PresignedGetObject(bucketName, objectName, 3600*time.Second, nil)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- // Verify if presigned url works.
- resp, err := http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- // Set request parameters.
- reqParams := make(url.Values)
- reqParams.Set("response-content-disposition", "attachment; filename=\"test.txt\"")
- presignedGetURL, err = c.PresignedGetObject(bucketName, objectName, 3600*time.Second, reqParams)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- // Verify if presigned url works.
- resp, err = http.Get(presignedGetURL.String())
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if resp.StatusCode != http.StatusOK {
- t.Fatal("Error: ", resp.Status)
- }
- newPresignedBytes, err = ioutil.ReadAll(resp.Body)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- if !bytes.Equal(newPresignedBytes, buf) {
- t.Fatal("Error: bytes mismatch for presigned GET URL.")
- }
- if resp.Header.Get("Content-Disposition") != "attachment; filename=\"test.txt\"" {
- t.Fatalf("Error: wrong Content-Disposition received %s", resp.Header.Get("Content-Disposition"))
- }
-
- presignedPutURL, err := c.PresignedPutObject(bucketName, objectName+"-presigned", 3600*time.Second)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- buf = make([]byte, rand.Intn(1<<20))
- _, err = io.ReadFull(crand.Reader, buf)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
- if err != nil {
- t.Fatal("Error: ", err)
- }
- httpClient := &http.Client{
- // Setting a sensible time out of 30secs to wait for response
- // headers. Request is pro-actively cancelled after 30secs
- // with no response.
- Timeout: 30 * time.Second,
- Transport: http.DefaultTransport,
- }
- resp, err = httpClient.Do(req)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReader, err = c.GetObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- newReadBytes, err = ioutil.ReadAll(newReader)
- if err != nil {
- t.Fatal("Error: ", err)
- }
-
- if !bytes.Equal(newReadBytes, buf) {
- t.Fatal("Error: bytes mismatch.")
- }
-
- err = c.RemoveObject(bucketName, objectName)
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-f")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-nolength")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveObject(bucketName, objectName+"-presigned")
- if err != nil {
- t.Fatal("Error: ", err)
- }
- err = c.RemoveBucket(bucketName)
- if err != nil {
- t.Fatal("Error:", err)
- }
- err = c.RemoveBucket(bucketName)
- if err == nil {
- t.Fatal("Error:")
- }
- if err.Error() != "The specified bucket does not exist" {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName); err != nil {
- t.Fatal("Error: ", err)
- }
- if err = os.Remove(fileName + "-f"); err != nil {
- t.Fatal("Error: ", err)
- }
-}
diff --git a/api_unit_test.go b/api_unit_test.go
index 4fb1978..817a8c2 100644
--- a/api_unit_test.go
+++ b/api_unit_test.go
@@ -26,6 +26,8 @@ import (
"os"
"strings"
"testing"
+
+ "github.com/minio/minio-go/pkg/policy"
)
type customReader struct{}
@@ -76,22 +78,41 @@ func TestGetReaderSize(t *testing.T) {
}
// Create request channel.
- reqCh := make(chan readRequest)
+ reqCh := make(chan getRequest, 1)
// Create response channel.
- resCh := make(chan readResponse)
+ resCh := make(chan getResponse, 1)
// Create done channel.
doneCh := make(chan struct{})
- // objectInfo.
+
objectInfo := ObjectInfo{Size: 10}
- objectReader := newObject(reqCh, resCh, doneCh, objectInfo)
- defer objectReader.Close()
+ // Create the first request.
+ firstReq := getRequest{
+ isReadOp: false, // Perform only a HEAD object to get objectInfo.
+ isFirstReq: true,
+ }
+ // Create the expected response.
+ firstRes := getResponse{
+ objectInfo: objectInfo,
+ }
+ // Send the expected response.
+ resCh <- firstRes
+
+ // Test setting size on the first request.
+ objectReaderFirstReq := newObject(reqCh, resCh, doneCh)
+ defer objectReaderFirstReq.Close()
+ // Not checking the response here...just that the reader size is correct.
+ _, err = objectReaderFirstReq.doGetRequest(firstReq)
+ if err != nil {
+ t.Fatal("Error:", err)
+ }
- size, err = getReaderSize(objectReader)
+ // Validate that the reader size is the objectInfo size.
+ size, err = getReaderSize(objectReaderFirstReq)
if err != nil {
t.Fatal("Error:", err)
}
if size != int64(10) {
- t.Fatalf("Reader length doesn't match got: %v, want: %v", size, 10)
+ t.Fatalf("Reader length doesn't match got: %d, wanted %d", size, objectInfo.Size)
}
fileReader, err := ioutil.TempFile(os.TempDir(), "prefix")
@@ -325,7 +346,7 @@ func TestBucketPolicyTypes(t *testing.T) {
"invalid": false,
}
for bucketPolicy, ok := range want {
- if BucketPolicy(bucketPolicy).isValidBucketPolicy() != ok {
+ if policy.BucketPolicy(bucketPolicy).IsValidBucketPolicy() != ok {
t.Fatal("Error")
}
}
@@ -333,24 +354,24 @@ func TestBucketPolicyTypes(t *testing.T) {
// Tests optimal part size.
func TestPartSize(t *testing.T) {
- totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5000000000000000000)
+ _, _, _, err := optimalPartInfo(5000000000000000000)
if err == nil {
t.Fatal("Error: should fail")
}
- totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5497558138880)
+ totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880)
if err != nil {
t.Fatal("Error: ", err)
}
- if totalPartsCount != 9987 {
+ if totalPartsCount != 9103 {
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
}
- if partSize != 550502400 {
+ if partSize != 603979776 {
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
}
- if lastPartSize != 241172480 {
+ if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
}
- totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000)
+ _, partSize, _, err = optimalPartInfo(5000000000)
if err != nil {
t.Fatal("Error:", err)
}
@@ -361,13 +382,13 @@ func TestPartSize(t *testing.T) {
if err != nil {
t.Fatal("Error:", err)
}
- if totalPartsCount != 9987 {
+ if totalPartsCount != 9103 {
t.Fatalf("Error: expecting total parts count of 9987: got %v instead", totalPartsCount)
}
- if partSize != 550502400 {
+ if partSize != 603979776 {
t.Fatalf("Error: expecting part size of 550502400: got %v instead", partSize)
}
- if lastPartSize != 241172480 {
+ if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
}
}
diff --git a/bucket-cache.go b/bucket-cache.go
index ca3dabf..4ad1069 100644
--- a/bucket-cache.go
+++ b/bucket-cache.go
@@ -25,7 +25,7 @@ import (
"sync"
)
-// bucketLocationCache - Provides simple mechansim to hold bucket
+// bucketLocationCache - Provides simple mechanism to hold bucket
// locations in memory.
type bucketLocationCache struct {
// mutex is used for handling the concurrent
@@ -66,8 +66,21 @@ func (r *bucketLocationCache) Delete(bucketName string) {
delete(r.items, bucketName)
}
-// getBucketLocation - Get location for the bucketName from location map cache.
+// GetBucketLocation - get location for the bucket name from location cache, if not
+// fetch freshly by making a new request.
+func (c Client) GetBucketLocation(bucketName string) (string, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
+ return c.getBucketLocation(bucketName)
+}
+
+// getBucketLocation - Get location for the bucketName from location map cache, if not
+// fetch freshly by making a new request.
func (c Client) getBucketLocation(bucketName string) (string, error) {
+ if err := isValidBucketName(bucketName); err != nil {
+ return "", err
+ }
if location, ok := c.bucketLocCache.Get(bucketName); ok {
return location, nil
}
@@ -165,7 +178,13 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Set sha256 sum for signature calculation only with signature version '4'.
if c.signature.isV4() {
- req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{})))
+ var contentSha256 string
+ if c.secure {
+ contentSha256 = unsignedPayload
+ } else {
+ contentSha256 = hex.EncodeToString(sum256([]byte{}))
+ }
+ req.Header.Set("X-Amz-Content-Sha256", contentSha256)
}
// Sign the request.
diff --git a/bucket-cache_test.go b/bucket-cache_test.go
index 76b4533..81cfbc0 100644
--- a/bucket-cache_test.go
+++ b/bucket-cache_test.go
@@ -316,7 +316,7 @@ func TestProcessBucketLocationResponse(t *testing.T) {
}
if err == nil && testCase.shouldPass {
if !reflect.DeepEqual(testCase.expectedResult, actualResult) {
- t.Errorf("Test %d: The expected BucketPolicy doesnt match the actual BucketPolicy", i+1)
+ t.Errorf("Test %d: The expected BucketPolicy doesn't match the actual BucketPolicy", i+1)
}
}
}
diff --git a/bucket-notification.go b/bucket-notification.go
new file mode 100644
index 0000000..121a63a
--- /dev/null
+++ b/bucket-notification.go
@@ -0,0 +1,228 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package minio
+
+import (
+ "encoding/xml"
+ "reflect"
+)
+
+// NotificationEventType is a S3 notification event associated to the bucket notification configuration
+type NotificationEventType string
+
+// The role of all event types are described in :
+// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations
+const (
+ ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*"
+ ObjectCreatePut = "s3:ObjectCreated:Put"
+ ObjectCreatedPost = "s3:ObjectCreated:Post"
+ ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+ ObjectCreatedCompleteMultipartUpload = "sh:ObjectCreated:CompleteMultipartUpload"
+ ObjectRemovedAll = "s3:ObjectRemoved:*"
+ ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+ ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+ ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+)
+
+// FilterRule - child of S3Key, a tag in the notification xml which
+// carries suffix/prefix filters
+type FilterRule struct {
+ Name string `xml:"Name"`
+ Value string `xml:"Value"`
+}
+
+// S3Key - child of Filter, a tag in the notification xml which
+// carries suffix/prefix filters
+type S3Key struct {
+ FilterRules []FilterRule `xml:"FilterRule,omitempty"`
+}
+
+// Filter - a tag in the notification xml structure which carries
+// suffix/prefix filters
+type Filter struct {
+ S3Key S3Key `xml:"S3Key,omitempty"`
+}
+
+// Arn - holds ARN information that will be sent to the web service,
+// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+type Arn struct {
+ Partition string
+ Service string
+ Region string
+ AccountID string
+ Resource string
+}
+
+// NewArn creates new ARN based on the given partition, service, region, account id and resource
+func NewArn(partition, service, region, accountID, resource string) Arn {
+ return Arn{Partition: partition,
+ Service: service,
+ Region: region,
+ AccountID: accountID,
+ Resource: resource}
+}
+
+// Return the string format of the ARN
+func (arn Arn) String() string {
+ return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource
+}
+
+// NotificationConfig - represents one single notification configuration
+// such as topic, queue or lambda configuration.
+type NotificationConfig struct {
+ Id string `xml:"Id,omitempty"`
+ Arn Arn `xml:"-"`
+ Events []NotificationEventType `xml:"Event"`
+ Filter *Filter `xml:"Filter,omitempty"`
+}
+
+// NewNotificationConfig creates one notification config and sets the given ARN
+func NewNotificationConfig(arn Arn) NotificationConfig {
+ return NotificationConfig{Arn: arn}
+}
+
+// AddEvents adds one event to the current notification config
+func (t *NotificationConfig) AddEvents(events ...NotificationEventType) {
+ t.Events = append(t.Events, events...)
+}
+
+// AddFilterSuffix sets the suffix configuration to the current notification config
+func (t *NotificationConfig) AddFilterSuffix(suffix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "suffix", Value: suffix}
+ // Replace any suffix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "suffix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// AddFilterPrefix sets the prefix configuration to the current notification config
+func (t *NotificationConfig) AddFilterPrefix(prefix string) {
+ if t.Filter == nil {
+ t.Filter = &Filter{}
+ }
+ newFilterRule := FilterRule{Name: "prefix", Value: prefix}
+ // Replace any prefix rule if existing and add to the list otherwise
+ for index := range t.Filter.S3Key.FilterRules {
+ if t.Filter.S3Key.FilterRules[index].Name == "prefix" {
+ t.Filter.S3Key.FilterRules[index] = newFilterRule
+ return
+ }
+ }
+ t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule)
+}
+
+// TopicConfig carries one single topic notification configuration
+type TopicConfig struct {
+ NotificationConfig
+ Topic string `xml:"Topic"`
+}
+
+// QueueConfig carries one single queue notification configuration
+type QueueConfig struct {
+ NotificationConfig
+ Queue string `xml:"Queue"`
+}
+
+// LambdaConfig carries one single cloudfunction notification configuration
+type LambdaConfig struct {
+ NotificationConfig
+ Lambda string `xml:"CloudFunction"`
+}
+
+// BucketNotification - the struct that represents the whole XML to be sent to the web service
+type BucketNotification struct {
+ XMLName xml.Name `xml:"NotificationConfiguration"`
+ LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"`
+ TopicConfigs []TopicConfig `xml:"TopicConfiguration"`
+ QueueConfigs []QueueConfig `xml:"QueueConfiguration"`
+}
+
+// AddTopic adds a given topic config to the general bucket notification config
+func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) {
+ newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()}
+ for _, n := range b.TopicConfigs {
+ if reflect.DeepEqual(n, newTopicConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.TopicConfigs = append(b.TopicConfigs, newTopicConfig)
+}
+
+// AddQueue adds a given queue config to the general bucket notification config
+func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) {
+ newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()}
+ for _, n := range b.QueueConfigs {
+ if reflect.DeepEqual(n, newQueueConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.QueueConfigs = append(b.QueueConfigs, newQueueConfig)
+}
+
+// AddLambda adds a given lambda config to the general bucket notification config
+func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) {
+ newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()}
+ for _, n := range b.LambdaConfigs {
+ if reflect.DeepEqual(n, newLambdaConfig) {
+ // Avoid adding duplicated entry
+ return
+ }
+ }
+ b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig)
+}
+
+// RemoveTopicByArn removes all topic configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveTopicByArn(arn Arn) {
+ var topics []TopicConfig
+ for _, topic := range b.TopicConfigs {
+ if topic.Topic != arn.String() {
+ topics = append(topics, topic)
+ }
+ }
+ b.TopicConfigs = topics
+}
+
+// RemoveQueueByArn removes all queue configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveQueueByArn(arn Arn) {
+ var queues []QueueConfig
+ for _, queue := range b.QueueConfigs {
+ if queue.Queue != arn.String() {
+ queues = append(queues, queue)
+ }
+ }
+ b.QueueConfigs = queues
+}
+
+// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN
+func (b *BucketNotification) RemoveLambdaByArn(arn Arn) {
+ var lambdas []LambdaConfig
+ for _, lambda := range b.LambdaConfigs {
+ if lambda.Lambda != arn.String() {
+ lambdas = append(lambdas, lambda)
+ }
+ }
+ b.LambdaConfigs = lambdas
+}
diff --git a/bucket-policy.go b/bucket-policy.go
deleted file mode 100644
index 0ade362..0000000
--- a/bucket-policy.go
+++ /dev/null
@@ -1,618 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/json"
- "fmt"
- "sort"
- "strings"
-)
-
-// maximum supported access policy size.
-const maxAccessPolicySize = 20 * 1024 * 1024 // 20KiB.
-
-// Resource prefix for all aws resources.
-const awsResourcePrefix = "arn:aws:s3:::"
-
-// BucketPolicy - Bucket level policy.
-type BucketPolicy string
-
-// Different types of Policies currently supported for buckets.
-const (
- BucketPolicyNone BucketPolicy = "none"
- BucketPolicyReadOnly = "readonly"
- BucketPolicyReadWrite = "readwrite"
- BucketPolicyWriteOnly = "writeonly"
-)
-
-// isValidBucketPolicy - Is provided policy value supported.
-func (p BucketPolicy) isValidBucketPolicy() bool {
- switch p {
- case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
- return true
- }
- return false
-}
-
-// User - canonical users list.
-type User struct {
- AWS []string
-}
-
-// Statement - minio policy statement
-type Statement struct {
- Sid string
- Effect string
- Principal User `json:"Principal"`
- Actions []string `json:"Action"`
- Resources []string `json:"Resource"`
- Conditions map[string]map[string]string `json:"Condition,omitempty"`
-}
-
-// BucketAccessPolicy - minio policy collection
-type BucketAccessPolicy struct {
- Version string // date in 0000-00-00 format
- Statements []Statement `json:"Statement"`
-}
-
-// Read write actions.
-var (
- readWriteBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level read-write actions here.
- }
- readWriteObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:GetObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level read-write actions here.
- }
-)
-
-// Write only actions.
-var (
- writeOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- "s3:ListBucketMultipartUploads",
- // Add more bucket level write actions here.
- }
- writeOnlyObjectActions = []string{
- "s3:AbortMultipartUpload",
- "s3:DeleteObject",
- "s3:ListMultipartUploadParts",
- "s3:PutObject",
- // Add more object level write actions here.
- }
-)
-
-// Read only actions.
-var (
- readOnlyBucketActions = []string{
- "s3:GetBucketLocation",
- // Add more bucket level read actions here.
- }
- readOnlyObjectActions = []string{
- "s3:GetObject",
- // Add more object level read actions here.
- }
-)
-
-// subsetActions returns true if the first array is completely
-// contained in the second array. There must be at least
-// the same number of duplicate values in second as there
-// are in first.
-func subsetActions(first, second []string) bool {
- set := make(map[string]int)
- for _, value := range second {
- set[value]++
- }
- for _, value := range first {
- if count, found := set[value]; !found {
- return false
- } else if count < 1 {
- return false
- } else {
- set[value] = count - 1
- }
- }
- return true
-}
-
-// Verifies if we have read/write policy set at bucketName, objectPrefix.
-func isBucketPolicyReadWrite(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readWrite bool
- sort.Strings(readWriteBucketActions)
- sort.Strings(readWriteObjectActions)
- for _, statement := range statements {
- if statement.Principal.AWS[0] != "*" {
- continue
- }
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readWriteBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
- if subsetActions(readWriteObjectActions, statement.Actions) {
- readWrite = true
- }
- }
- }
- }
- return commonActions && readWrite
-}
-
-// Verifies if we have write only policy set at bucketName, objectPrefix.
-func isBucketPolicyWriteOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, writeOnly bool
- sort.Strings(writeOnlyBucketActions)
- sort.Strings(writeOnlyObjectActions)
- for _, statement := range statements {
- if statement.Principal.AWS[0] != "*" {
- continue
- }
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(writeOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
- if subsetActions(writeOnlyObjectActions, statement.Actions) {
- writeOnly = true
- }
- }
- }
- }
- return commonActions && writeOnly
-}
-
-// Verifies if we have read only policy set at bucketName, objectPrefix.
-func isBucketPolicyReadOnly(statements []Statement, bucketName string, objectPrefix string) bool {
- var commonActions, readOnly bool
- sort.Strings(readOnlyBucketActions)
- sort.Strings(readOnlyObjectActions)
- for _, statement := range statements {
- if statement.Principal.AWS[0] != "*" {
- continue
- }
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName {
- if subsetActions(readOnlyBucketActions, statement.Actions) {
- commonActions = true
- continue
- }
- } else if resourceMatch(resource, awsResourcePrefix+bucketName+"/"+objectPrefix) {
- if subsetActions(readOnlyObjectActions, statement.Actions) {
- readOnly = true
- break
- }
- }
- }
- }
- return commonActions && readOnly
-}
-
-// isAction - returns true if action is found amond the list of actions.
-func isAction(action string, actions []string) bool {
- for _, act := range actions {
- if action == act {
- return true
- }
- }
- return false
-}
-
-// removeReadBucketActions - removes readWriteBucket actions if found.
-func removeReadBucketActions(statements []Statement, bucketName string) []Statement {
- var newStatements []Statement
- var bucketActionsRemoved bool
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName && !bucketActionsRemoved {
- var newActions []string
- for _, action := range statement.Actions {
- if isAction(action, readWriteBucketActions) {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- bucketActionsRemoved = true
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// removeListBucketActions - removes "s3:ListBucket" action if found.
-func removeListBucketAction(statements []Statement, bucketName string) []Statement {
- var newStatements []Statement
- var listBucketActionsRemoved bool
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName && !listBucketActionsRemoved {
- var newActions []string
- for _, action := range statement.Actions {
- if isAction(action, []string{"s3:ListBucket"}) {
- delete(statement.Conditions, "StringEquals")
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- listBucketActionsRemoved = true
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// removeWriteObjectActions - removes writeOnlyObject actions if found.
-func removeWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- if isAction(action, writeOnlyObjectActions) {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// removeReadObjectActions - removes "s3:GetObject" actions if found.
-func removeReadObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- if isAction(action, []string{"s3:GetObject"}) {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// removeReadWriteObjectActions - removes readWriteObject actions if found.
-func removeReadWriteObjectActions(statements []Statement, bucketName string, objectPrefix string) []Statement {
- var newStatements []Statement
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- var newActions []string
- for _, action := range statement.Actions {
- if isAction(action, readWriteObjectActions) {
- continue
- }
- newActions = append(newActions, action)
- }
- statement.Actions = newActions
- }
- }
- if len(statement.Actions) != 0 {
- newStatements = append(newStatements, statement)
- }
- }
- return newStatements
-}
-
-// Removes read write bucket policy if found.
-func removeBucketPolicyStatementReadWrite(statements []Statement, bucketName string, objectPrefix string) []Statement {
- newStatements := removeReadBucketActions(statements, bucketName)
- newStatements = removeListBucketAction(newStatements, bucketName)
- newStatements = removeReadWriteObjectActions(newStatements, bucketName, objectPrefix)
- return newStatements
-}
-
-// Removes write only bucket policy if found.
-func removeBucketPolicyStatementWriteOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- newStatements := removeReadBucketActions(statements, bucketName)
- newStatements = removeWriteObjectActions(newStatements, bucketName, objectPrefix)
- return newStatements
-}
-
-// Removes read only bucket policy if found.
-func removeBucketPolicyStatementReadOnly(statements []Statement, bucketName string, objectPrefix string) []Statement {
- newStatements := removeReadBucketActions(statements, bucketName)
- newStatements = removeListBucketAction(newStatements, bucketName)
- newStatements = removeReadObjectActions(newStatements, bucketName, objectPrefix)
- return newStatements
-}
-
-// Remove bucket policies based on the type.
-func removeBucketPolicyStatement(statements []Statement, bucketName string, objectPrefix string) []Statement {
- // Verify that a policy is defined on the object prefix, otherwise do not remove the policy
- if isPolicyDefinedForObjectPrefix(statements, bucketName, objectPrefix) {
- // Verify type of policy to be removed.
- if isBucketPolicyReadWrite(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadWrite(statements, bucketName, objectPrefix)
- } else if isBucketPolicyWriteOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementWriteOnly(statements, bucketName, objectPrefix)
- } else if isBucketPolicyReadOnly(statements, bucketName, objectPrefix) {
- statements = removeBucketPolicyStatementReadOnly(statements, bucketName, objectPrefix)
- }
- }
- return statements
-}
-
-// Checks if an access policiy is defined for the given object prefix
-func isPolicyDefinedForObjectPrefix(statements []Statement, bucketName string, objectPrefix string) bool {
- for _, statement := range statements {
- for _, resource := range statement.Resources {
- if resource == awsResourcePrefix+bucketName+"/"+objectPrefix+"*" {
- return true
- }
- }
- }
- return false
-}
-
-// Unmarshals bucket policy byte array into a structured bucket access policy.
-func unMarshalBucketPolicy(bucketPolicyBuf []byte) (BucketAccessPolicy, error) {
- // Untyped lazy JSON struct.
- type bucketAccessPolicyUntyped struct {
- Version string
- Statement []struct {
- Sid string
- Effect string
- Principal struct {
- AWS json.RawMessage
- }
- Action json.RawMessage
- Resource json.RawMessage
- Condition map[string]map[string]string
- }
- }
- var policyUntyped = bucketAccessPolicyUntyped{}
- // Unmarshal incoming policy into an untyped structure, to be
- // evaluated lazily later.
- err := json.Unmarshal(bucketPolicyBuf, &policyUntyped)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- var policy = BucketAccessPolicy{}
- policy.Version = policyUntyped.Version
- for _, stmtUntyped := range policyUntyped.Statement {
- statement := Statement{}
- // These are properly typed messages.
- statement.Sid = stmtUntyped.Sid
- statement.Effect = stmtUntyped.Effect
- statement.Conditions = stmtUntyped.Condition
-
- // AWS user can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &statement.Principal.AWS)
- if err != nil {
- var awsUser string
- err = json.Unmarshal(stmtUntyped.Principal.AWS, &awsUser)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Principal.AWS = []string{awsUser}
- }
- // Actions can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Action, &statement.Actions)
- if err != nil {
- var action string
- err = json.Unmarshal(stmtUntyped.Action, &action)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Actions = []string{action}
- }
- // Resources can have two different types, either as []string
- // and either as regular 'string'. We fall back to doing this
- // since there is no other easier way to fix this.
- err = json.Unmarshal(stmtUntyped.Resource, &statement.Resources)
- if err != nil {
- var resource string
- err = json.Unmarshal(stmtUntyped.Resource, &resource)
- if err != nil {
- return BucketAccessPolicy{}, err
- }
- statement.Resources = []string{resource}
- }
- // Append the typed policy.
- policy.Statements = append(policy.Statements, statement)
- }
- return policy, nil
-}
-
-// Identifies the policy type from policy Statements.
-func identifyPolicyType(policy BucketAccessPolicy, bucketName, objectPrefix string) (bucketPolicy BucketPolicy) {
- if policy.Statements == nil {
- return BucketPolicyNone
- }
- if isBucketPolicyReadWrite(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadWrite
- } else if isBucketPolicyWriteOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyWriteOnly
- } else if isBucketPolicyReadOnly(policy.Statements, bucketName, objectPrefix) {
- return BucketPolicyReadOnly
- }
- return BucketPolicyNone
-}
-
-// Generate policy statements for various bucket policies.
-// refer to http://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
-// for more details about statement fields.
-func generatePolicyStatement(bucketPolicy BucketPolicy, bucketName, objectPrefix string) ([]Statement, error) {
- if !bucketPolicy.isValidBucketPolicy() {
- return []Statement{}, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy))
- }
- var statements []Statement
- if bucketPolicy == BucketPolicyNone {
- return []Statement{}, nil
- } else if bucketPolicy == BucketPolicyReadWrite {
- // Get read-write policy.
- statements = setReadWriteStatement(bucketName, objectPrefix)
- } else if bucketPolicy == BucketPolicyReadOnly {
- // Get read only policy.
- statements = setReadOnlyStatement(bucketName, objectPrefix)
- } else if bucketPolicy == BucketPolicyWriteOnly {
- // Return Write only policy.
- statements = setWriteOnlyStatement(bucketName, objectPrefix)
- }
- return statements, nil
-}
-
-// Obtain statements for read-write BucketPolicy.
-func setReadWriteStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readWriteBucketActions
-
- bucketListResourceStatement := Statement{}
- bucketListResourceStatement.Effect = "Allow"
- bucketListResourceStatement.Principal.AWS = []string{"*"}
- bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
- // Object prefix is present, make sure to set the conditions for s3:ListBucket.
- if objectPrefix != "" {
- bucketListResourceStatement.Conditions = map[string]map[string]string{
- "StringEquals": {
- "s3:prefix": objectPrefix,
- },
- }
- }
- objectResourceStatement := Statement{}
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readWriteObjectActions
- // Save the read write policy.
- statements := []Statement{}
- statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
- return statements
-}
-
-// Obtain statements for read only BucketPolicy.
-func setReadOnlyStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readOnlyBucketActions
-
- bucketListResourceStatement := Statement{}
- bucketListResourceStatement.Effect = "Allow"
- bucketListResourceStatement.Principal.AWS = []string{"*"}
- bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
- // Object prefix is present, make sure to set the conditions for s3:ListBucket.
- if objectPrefix != "" {
- bucketListResourceStatement.Conditions = map[string]map[string]string{
- "StringEquals": {
- "s3:prefix": objectPrefix,
- },
- }
- }
- objectResourceStatement := Statement{}
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readOnlyObjectActions
-
- statements := []Statement{}
-
- // Save the read only policy.
- statements = append(statements, bucketResourceStatement, bucketListResourceStatement, objectResourceStatement)
- return statements
-}
-
-// Obtain statements for write only BucketPolicy.
-func setWriteOnlyStatement(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := Statement{}
- objectResourceStatement := Statement{}
- statements := []Statement{}
- // Write only policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = writeOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = writeOnlyObjectActions
- // Save the write only policy.
- statements = append(statements, bucketResourceStatement, objectResourceStatement)
- return statements
-}
-
-// Match function matches wild cards in 'pattern' for resource.
-func resourceMatch(pattern, resource string) bool {
- if pattern == "" {
- return resource == pattern
- }
- if pattern == "*" {
- return true
- }
- parts := strings.Split(pattern, "*")
- if len(parts) == 1 {
- return resource == pattern
- }
- tGlob := strings.HasSuffix(pattern, "*")
- end := len(parts) - 1
- if !strings.HasPrefix(resource, parts[0]) {
- return false
- }
- for i := 1; i < end; i++ {
- if !strings.Contains(resource, parts[i]) {
- return false
- }
- idx := strings.Index(resource, parts[i]) + len(parts[i])
- resource = resource[idx:]
- }
- return tGlob || strings.HasSuffix(resource, parts[end])
-}
diff --git a/bucket-policy_test.go b/bucket-policy_test.go
deleted file mode 100644
index f683679..0000000
--- a/bucket-policy_test.go
+++ /dev/null
@@ -1,645 +0,0 @@
-/*
- * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package minio
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "testing"
-)
-
-// Validates bucket policy string.
-func TestIsValidBucketPolicy(t *testing.T) {
- testCases := []struct {
- inputPolicy BucketPolicy
- expectedResult bool
- }{
- // valid inputs.
- {BucketPolicy("none"), true},
- {BucketPolicy("readonly"), true},
- {BucketPolicy("readwrite"), true},
- {BucketPolicy("writeonly"), true},
- // invalid input.
- {BucketPolicy("readwriteonly"), false},
- {BucketPolicy("writeread"), false},
- }
-
- for i, testCase := range testCases {
- actualResult := testCase.inputPolicy.isValidBucketPolicy()
- if testCase.expectedResult != actualResult {
- t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
- }
- }
-}
-
-// Tests whether first array is completly contained in second array.
-func TestSubsetActions(t *testing.T) {
- testCases := []struct {
- firstArray []string
- secondArray []string
-
- expectedResult bool
- }{
- {[]string{"aaa", "bbb"}, []string{"ccc", "bbb"}, false},
- {[]string{"aaa", "bbb"}, []string{"aaa", "ccc"}, false},
- {[]string{"aaa", "bbb"}, []string{"aaa", "bbb"}, true},
- {[]string{"aaa", "bbb"}, []string{"aaa", "bbb", "ccc"}, true},
- {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "ccc"}, false},
- {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "bbb", "aaa"}, true},
- {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb"}, false},
- {[]string{"aaa", "bbb", "aaa"}, []string{"aaa", "bbb", "aaa", "bbb", "ccc"}, true},
- }
- for i, testCase := range testCases {
- actualResult := subsetActions(testCase.firstArray, testCase.secondArray)
- if testCase.expectedResult != actualResult {
- t.Errorf("Test %d: First array '%v' is not contained in second array '%v'", i+1, testCase.firstArray, testCase.secondArray)
- }
- }
-
-}
-
-// Tests validate Bucket Policy type identifier.
-func TestIdentifyPolicyType(t *testing.T) {
- testCases := []struct {
- inputPolicy BucketAccessPolicy
- bucketName string
- objName string
-
- expectedPolicy BucketPolicy
- }{
- {BucketAccessPolicy{Version: "2012-10-17"}, "my-bucket", "", BucketPolicyNone},
- }
- for i, testCase := range testCases {
- actualBucketPolicy := identifyPolicyType(testCase.inputPolicy, testCase.bucketName, testCase.objName)
- if testCase.expectedPolicy != actualBucketPolicy {
- t.Errorf("Test %d: Expected bucket policy to be '%v', but instead got '%v'", i+1, testCase.expectedPolicy, actualBucketPolicy)
- }
- }
-}
-
-// Test validate Resource Statement Generator.
-func TestGeneratePolicyStatement(t *testing.T) {
-
- testCases := []struct {
- bucketPolicy BucketPolicy
- bucketName string
- objectPrefix string
- expectedStatements []Statement
-
- shouldPass bool
- err error
- }{
- {BucketPolicy("my-policy"), "my-bucket", "", []Statement{}, false, ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", BucketPolicy("my-policy")))},
- {BucketPolicyNone, "my-bucket", "", []Statement{}, true, nil},
- {BucketPolicyReadOnly, "read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true, nil},
- {BucketPolicyWriteOnly, "write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true, nil},
- {BucketPolicyReadWrite, "read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true, nil},
- }
- for i, testCase := range testCases {
- actualStatements, err := generatePolicyStatement(testCase.bucketPolicy, testCase.bucketName, testCase.objectPrefix)
-
- if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
- }
-
- if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
- }
- // Failed as expected, but does it fail for the expected reason.
- if err != nil && !testCase.shouldPass {
- if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
- }
- }
- // Test passes as expected, but the output values are verified for correctness here.
- if err == nil && testCase.shouldPass {
- if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
- }
-}
-
-// Tests validating read only statement generator.
-func TestsetReadOnlyStatement(t *testing.T) {
-
- expectedReadOnlyStatement := func(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := &Statement{}
- bucketListResourceStatement := &Statement{}
- objectResourceStatement := &Statement{}
- statements := []Statement{}
-
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readOnlyBucketActions
- bucketListResourceStatement.Effect = "Allow"
- bucketListResourceStatement.Principal.AWS = []string{"*"}
- bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
- if objectPrefix != "" {
- bucketListResourceStatement.Conditions = map[string]map[string]string{
- "StringEquals": {
- "s3:prefix": objectPrefix,
- },
- }
- }
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readOnlyObjectActions
- // Save the read only policy.
- statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
- return statements
- }
-
- testCases := []struct {
- // inputs.
- bucketName string
- objectPrefix string
- // expected result.
- expectedStatements []Statement
- }{
- {"my-bucket", "", expectedReadOnlyStatement("my-bucket", "")},
- {"my-bucket", "Asia/", expectedReadOnlyStatement("my-bucket", "Asia/")},
- {"my-bucket", "Asia/India", expectedReadOnlyStatement("my-bucket", "Asia/India")},
- }
-
- for i, testCase := range testCases {
- actualStaments := setReadOnlyStatement(testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
-}
-
-// Tests validating write only statement generator.
-func TestsetWriteOnlyStatement(t *testing.T) {
-
- expectedWriteOnlyStatement := func(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := &Statement{}
- objectResourceStatement := &Statement{}
- statements := []Statement{}
- // Write only policy.
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = writeOnlyBucketActions
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = writeOnlyObjectActions
- // Save the write only policy.
- statements = append(statements, *bucketResourceStatement, *objectResourceStatement)
- return statements
- }
- testCases := []struct {
- // inputs.
- bucketName string
- objectPrefix string
- // expected result.
- expectedStatements []Statement
- }{
- {"my-bucket", "", expectedWriteOnlyStatement("my-bucket", "")},
- {"my-bucket", "Asia/", expectedWriteOnlyStatement("my-bucket", "Asia/")},
- {"my-bucket", "Asia/India", expectedWriteOnlyStatement("my-bucket", "Asia/India")},
- }
-
- for i, testCase := range testCases {
- actualStaments := setWriteOnlyStatement(testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
-}
-
-// Tests validating read-write statement generator.
-func TestsetReadWriteStatement(t *testing.T) {
- // Obtain statements for read-write BucketPolicy.
- expectedReadWriteStatement := func(bucketName, objectPrefix string) []Statement {
- bucketResourceStatement := &Statement{}
- bucketListResourceStatement := &Statement{}
- objectResourceStatement := &Statement{}
- statements := []Statement{}
-
- bucketResourceStatement.Effect = "Allow"
- bucketResourceStatement.Principal.AWS = []string{"*"}
- bucketResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketResourceStatement.Actions = readWriteBucketActions
- bucketListResourceStatement.Effect = "Allow"
- bucketListResourceStatement.Principal.AWS = []string{"*"}
- bucketListResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName)}
- bucketListResourceStatement.Actions = []string{"s3:ListBucket"}
- if objectPrefix != "" {
- bucketListResourceStatement.Conditions = map[string]map[string]string{
- "StringEquals": {
- "s3:prefix": objectPrefix,
- },
- }
- }
- objectResourceStatement.Effect = "Allow"
- objectResourceStatement.Principal.AWS = []string{"*"}
- objectResourceStatement.Resources = []string{fmt.Sprintf("%s%s", awsResourcePrefix, bucketName+"/"+objectPrefix+"*")}
- objectResourceStatement.Actions = readWriteObjectActions
- // Save the read write policy.
- statements = append(statements, *bucketResourceStatement, *bucketListResourceStatement, *objectResourceStatement)
- return statements
- }
-
- testCases := []struct {
- // inputs.
- bucketName string
- objectPrefix string
- // expected result.
- expectedStatements []Statement
- }{
- {"my-bucket", "", expectedReadWriteStatement("my-bucket", "")},
- {"my-bucket", "Asia/", expectedReadWriteStatement("my-bucket", "Asia/")},
- {"my-bucket", "Asia/India", expectedReadWriteStatement("my-bucket", "Asia/India")},
- }
-
- for i, testCase := range testCases {
- actualStaments := setReadWriteStatement(testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStaments) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
-}
-
-// Tests validate Unmarshalling of BucketAccessPolicy.
-func TestUnMarshalBucketPolicy(t *testing.T) {
-
- bucketAccesPolicies := []BucketAccessPolicy{
- {Version: "1.0"},
- {Version: "1.0", Statements: setReadOnlyStatement("minio-bucket", "")},
- {Version: "1.0", Statements: setReadWriteStatement("minio-bucket", "Asia/")},
- {Version: "1.0", Statements: setWriteOnlyStatement("minio-bucket", "Asia/India/")},
- }
-
- testCases := []struct {
- inputPolicy BucketAccessPolicy
- // expected results.
- expectedPolicy BucketAccessPolicy
- err error
- // Flag indicating whether the test should pass.
- shouldPass bool
- }{
- {bucketAccesPolicies[0], bucketAccesPolicies[0], nil, true},
- {bucketAccesPolicies[1], bucketAccesPolicies[1], nil, true},
- {bucketAccesPolicies[2], bucketAccesPolicies[2], nil, true},
- {bucketAccesPolicies[3], bucketAccesPolicies[3], nil, true},
- }
- for i, testCase := range testCases {
- inputPolicyBytes, e := json.Marshal(testCase.inputPolicy)
- if e != nil {
- t.Fatalf("Test %d: Couldn't Marshal bucket policy", i+1)
- }
- actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
- if err != nil && testCase.shouldPass {
- t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
- }
-
- if err == nil && !testCase.shouldPass {
- t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
- }
- // Failed as expected, but does it fail for the expected reason.
- if err != nil && !testCase.shouldPass {
- if err.Error() != testCase.err.Error() {
- t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
- }
- }
- // Test passes as expected, but the output values are verified for correctness here.
- if err == nil && testCase.shouldPass {
- if !reflect.DeepEqual(testCase.expectedPolicy, actualAccessPolicy) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
- }
-}
-
-// Statement.Action, Statement.Resource, Statement.Principal.AWS fields could be just string also.
-// Setting these values to just a string and testing the unMarshalBucketPolicy
-func TestUnMarshalBucketPolicyUntyped(t *testing.T) {
- obtainRaw := func(v interface{}, t *testing.T) []byte {
- rawData, err := json.Marshal(v)
- if err != nil {
- t.Fatal(err)
- }
- return rawData
- }
-
- type untypedStatement struct {
- Sid string
- Effect string
- Principal struct {
- AWS json.RawMessage
- }
- Action json.RawMessage
- Resource json.RawMessage
- Condition map[string]map[string]string
- }
-
- type bucketAccessPolicyUntyped struct {
- Version string
- Statement []untypedStatement
- }
-
- statements := setReadOnlyStatement("my-bucket", "Asia/")
- expectedBucketPolicy := BucketAccessPolicy{Statements: statements}
- accessPolicyUntyped := bucketAccessPolicyUntyped{}
- accessPolicyUntyped.Statement = make([]untypedStatement, len(statements))
-
- accessPolicyUntyped.Statement[0].Effect = statements[0].Effect
- accessPolicyUntyped.Statement[0].Principal.AWS = obtainRaw(statements[0].Principal.AWS[0], t)
- accessPolicyUntyped.Statement[0].Action = obtainRaw(statements[0].Actions, t)
- accessPolicyUntyped.Statement[0].Resource = obtainRaw(statements[0].Resources, t)
-
- accessPolicyUntyped.Statement[1].Effect = statements[1].Effect
- accessPolicyUntyped.Statement[1].Principal.AWS = obtainRaw(statements[1].Principal.AWS[0], t)
- accessPolicyUntyped.Statement[1].Action = obtainRaw(statements[1].Actions, t)
- accessPolicyUntyped.Statement[1].Resource = obtainRaw(statements[1].Resources, t)
- accessPolicyUntyped.Statement[1].Condition = statements[1].Conditions
-
- // Setting the values are strings.
- accessPolicyUntyped.Statement[2].Effect = statements[2].Effect
- accessPolicyUntyped.Statement[2].Principal.AWS = obtainRaw(statements[2].Principal.AWS[0], t)
- accessPolicyUntyped.Statement[2].Action = obtainRaw(statements[2].Actions[0], t)
- accessPolicyUntyped.Statement[2].Resource = obtainRaw(statements[2].Resources[0], t)
-
- inputPolicyBytes := obtainRaw(accessPolicyUntyped, t)
- actualAccessPolicy, err := unMarshalBucketPolicy(inputPolicyBytes)
- if err != nil {
- t.Fatal("Unmarshalling bucket policy from untyped statements failed")
- }
- if !reflect.DeepEqual(expectedBucketPolicy, actualAccessPolicy) {
- t.Errorf("Expected BucketPolicy after unmarshalling untyped statements doesn't match the actual one")
- }
-}
-
-// Tests validate whether access policy is defined for the given object prefix
-func TestIsPolicyDefinedForObjectPrefix(t *testing.T) {
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- expectedResult bool
- }{
- {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/"), true},
- {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "ab/"), false},
- {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abcde"), false},
- {"my-bucket", "abc/", setReadOnlyStatement("my-bucket", "abc/de"), false},
- {"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
- {"bucket", "", setReadOnlyStatement("bucket", "abc/"), false},
- }
- for i, testCase := range testCases {
- actualResult := isPolicyDefinedForObjectPrefix(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if actualResult != testCase.expectedResult {
- t.Errorf("Test %d: Expected isPolicyDefinedForObjectPrefix to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
- }
- }
-}
-
-// Tests validate removal of policy statement from the list of statements.
-func TestRemoveBucketPolicyStatement(t *testing.T) {
- var emptyStatement []Statement
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- expectedStatements []Statement
- }{
- {"my-bucket", "", nil, emptyStatement},
- {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
- {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
- {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
- {"my-bucket", "abcd", setReadOnlyStatement("my-bucket", "abc"), setReadOnlyStatement("my-bucket", "abc")},
- {"my-bucket", "abc/de", setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "abc/")},
- {"my-bucket", "abcd", setWriteOnlyStatement("my-bucket", "abc"), setWriteOnlyStatement("my-bucket", "abc")},
- {"my-bucket", "abc/de", setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "abc/")},
- {"my-bucket", "abcd", setReadWriteStatement("my-bucket", "abc"), setReadWriteStatement("my-bucket", "abc")},
- {"my-bucket", "abc/de", setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "abc/")},
- }
- for i, testCase := range testCases {
- actualStatements := removeBucketPolicyStatement(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
- t.Errorf("Test %d: The expected statements from resource statement generator doesn't match the actual statements", i+1)
- }
- }
-}
-
-// Tests validate removing of read only bucket statement.
-func TestRemoveBucketPolicyStatementReadOnly(t *testing.T) {
- var emptyStatement []Statement
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- expectedStatements []Statement
- }{
- {"my-bucket", "", []Statement{}, emptyStatement},
- {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), emptyStatement},
- {"read-only-bucket", "abc/", setReadOnlyStatement("read-only-bucket", "abc/"), emptyStatement},
- {"my-bucket", "abc/", append(setReadOnlyStatement("my-bucket", "abc/"), setReadOnlyStatement("my-bucket", "def/")...), setReadOnlyStatement("my-bucket", "def/")},
- }
- for i, testCase := range testCases {
- actualStatements := removeBucketPolicyStatementReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
- t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
- }
- }
-}
-
-// Tests validate removing of write only bucket statement.
-func TestRemoveBucketPolicyStatementWriteOnly(t *testing.T) {
- var emptyStatement []Statement
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- expectedStatements []Statement
- }{
- {"my-bucket", "", []Statement{}, emptyStatement},
- {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), emptyStatement},
- {"write-only-bucket", "abc/", setWriteOnlyStatement("write-only-bucket", "abc/"), emptyStatement},
- {"my-bucket", "abc/", append(setWriteOnlyStatement("my-bucket", "abc/"), setWriteOnlyStatement("my-bucket", "def/")...), setWriteOnlyStatement("my-bucket", "def/")},
- }
- for i, testCase := range testCases {
- actualStatements := removeBucketPolicyStatementWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
- t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
- }
- }
-}
-
-// Tests validate removing of read-write bucket statement.
-func TestRemoveBucketPolicyStatementReadWrite(t *testing.T) {
- var emptyStatement []Statement
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- expectedStatements []Statement
- }{
- {"my-bucket", "", []Statement{}, emptyStatement},
- {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), emptyStatement},
- {"read-write-bucket", "abc/", setReadWriteStatement("read-write-bucket", "abc/"), emptyStatement},
- {"my-bucket", "abc/", append(setReadWriteStatement("my-bucket", "abc/"), setReadWriteStatement("my-bucket", "def/")...), setReadWriteStatement("my-bucket", "def/")},
- }
- for i, testCase := range testCases {
- actualStatements := removeBucketPolicyStatementReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if !reflect.DeepEqual(testCase.expectedStatements, actualStatements) {
- t.Errorf("Test %d: Expected policy statements doesn't match the actual one", i+1)
- }
- }
-}
-
-// Tests validate Bucket policy resource matcher.
-func TestBucketPolicyResourceMatch(t *testing.T) {
-
- // generates\ statement with given resource..
- generateStatement := func(resource string) Statement {
- statement := Statement{}
- statement.Resources = []string{resource}
- return statement
- }
-
- // generates resource prefix.
- generateResource := func(bucketName, objectName string) string {
- return awsResourcePrefix + bucketName + "/" + objectName
- }
-
- testCases := []struct {
- resourceToMatch string
- statement Statement
- expectedResourceMatch bool
- }{
- // Test case 1-4.
- // Policy with resource ending with bucket/* allows access to all objects inside the given bucket.
- {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
- {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
- {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
- {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
- // Test case - 5.
- // Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt.
- {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), false},
- // Test case - 6.
- // Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt.
- {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
- // Test case - 7.
- // Policy with resource ending with bucket/oo* allows access to all subfolders starting with "oo" inside given bucket.
- {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
- // Test case - 8.
- {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
- // Test case - 9.
- {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
- // Test case - 10.
- // Proves that the name space is flat.
- {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
- "minio-bucket"+"/*/India/*/Bihar")), true},
- // Test case - 11.
- // Proves that the name space is flat.
- {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
- "minio-bucket"+"/*/India/*/Bihar/*")), true},
- }
- for i, testCase := range testCases {
- actualResourceMatch := resourceMatch(testCase.statement.Resources[0], testCase.resourceToMatch)
- if testCase.expectedResourceMatch != actualResourceMatch {
- t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch)
- }
- }
-}
-
-// Tests validate whether the bucket policy is read only.
-func TestIsBucketPolicyReadOnly(t *testing.T) {
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- // expected result.
- expectedResult bool
- }{
- {"my-bucket", "", []Statement{}, false},
- {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), true},
- {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
- {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
- {"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), true},
- {"my-bucket", "abc", setReadOnlyStatement("my-bucket", "abc"), true},
- {"my-bucket", "abcde", setReadOnlyStatement("my-bucket", "abc"), true},
- {"my-bucket", "abc/d", setReadOnlyStatement("my-bucket", "abc/"), true},
- {"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), false},
- }
- for i, testCase := range testCases {
- actualResult := isBucketPolicyReadOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if testCase.expectedResult != actualResult {
- t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
- }
- }
-}
-
-// Tests validate whether the bucket policy is read-write.
-func TestIsBucketPolicyReadWrite(t *testing.T) {
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- // expected result.
- expectedResult bool
- }{
- {"my-bucket", "", []Statement{}, false},
- {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
- {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), false},
- {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
- {"my-bucket", "abc", setReadWriteStatement("my-bucket", ""), true},
- {"my-bucket", "abc", setReadWriteStatement("my-bucket", "abc"), true},
- {"my-bucket", "abcde", setReadWriteStatement("my-bucket", "abc"), true},
- {"my-bucket", "abc/d", setReadWriteStatement("my-bucket", "abc/"), true},
- }
- for i, testCase := range testCases {
- actualResult := isBucketPolicyReadWrite(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if testCase.expectedResult != actualResult {
- t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
- }
- }
-}
-
-// Tests validate whether the bucket policy is read only.
-func TestIsBucketPolicyWriteOnly(t *testing.T) {
- testCases := []struct {
- bucketName string
- objectPrefix string
- inputStatements []Statement
- // expected result.
- expectedResult bool
- }{
- {"my-bucket", "", []Statement{}, false},
- {"read-only-bucket", "", setReadOnlyStatement("read-only-bucket", ""), false},
- {"write-only-bucket", "", setWriteOnlyStatement("write-only-bucket", ""), true},
- {"read-write-bucket", "", setReadWriteStatement("read-write-bucket", ""), true},
- {"my-bucket", "abc", setWriteOnlyStatement("my-bucket", ""), true},
- {"my-bucket", "abc", setWriteOnlyStatement("my-bucket", "abc"), true},
- {"my-bucket", "abcde", setWriteOnlyStatement("my-bucket", "abc"), true},
- {"my-bucket", "abc/d", setWriteOnlyStatement("my-bucket", "abc/"), true},
- {"my-bucket", "abc", setReadOnlyStatement("my-bucket", ""), false},
- }
- for i, testCase := range testCases {
- actualResult := isBucketPolicyWriteOnly(testCase.inputStatements, testCase.bucketName, testCase.objectPrefix)
- if testCase.expectedResult != actualResult {
- t.Errorf("Test %d: Expected isBucketPolicyReadonly to '%v', but instead found '%v'", i+1, testCase.expectedResult, actualResult)
- }
- }
-}
diff --git a/constants.go b/constants.go
index b0aa009..779ed8c 100644
--- a/constants.go
+++ b/constants.go
@@ -18,9 +18,9 @@ package minio
/// Multipart upload defaults.
-// miniPartSize - minimum part size 5MiB per object after which
+// miniPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
-const minPartSize = 1024 * 1024 * 5
+const minPartSize = 1024 * 1024 * 64
// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
@@ -40,3 +40,7 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5
// optimalReadBufferSize - optimal buffer 5MiB used for reading
// through Read operation.
const optimalReadBufferSize = 1024 * 1024 * 5
+
+// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
+// we don't want to sign the request payload
+const unsignedPayload = "UNSIGNED-PAYLOAD"
diff --git a/copy-conditions.go b/copy-conditions.go
index 9dd63f6..5dcdfae 100644
--- a/copy-conditions.go
+++ b/copy-conditions.go
@@ -49,7 +49,7 @@ func NewCopyConditions() CopyConditions {
}
// SetMatchETag - set match etag.
-func (c CopyConditions) SetMatchETag(etag string) error {
+func (c *CopyConditions) SetMatchETag(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
@@ -61,7 +61,7 @@ func (c CopyConditions) SetMatchETag(etag string) error {
}
// SetMatchETagExcept - set match etag except.
-func (c CopyConditions) SetMatchETagExcept(etag string) error {
+func (c *CopyConditions) SetMatchETagExcept(etag string) error {
if etag == "" {
return ErrInvalidArgument("ETag cannot be empty.")
}
@@ -73,7 +73,7 @@ func (c CopyConditions) SetMatchETagExcept(etag string) error {
}
// SetUnmodified - set unmodified time since.
-func (c CopyConditions) SetUnmodified(modTime time.Time) error {
+func (c *CopyConditions) SetUnmodified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
@@ -85,7 +85,7 @@ func (c CopyConditions) SetUnmodified(modTime time.Time) error {
}
// SetModified - set modified time since.
-func (c CopyConditions) SetModified(modTime time.Time) error {
+func (c *CopyConditions) SetModified(modTime time.Time) error {
if modTime.IsZero() {
return ErrInvalidArgument("Modified since cannot be empty.")
}
diff --git a/API.md b/docs/API.md
index 0554e90..9977c5d 100644
--- a/API.md
+++ b/docs/API.md
@@ -1,8 +1,11 @@
-# Golang Client API Reference
-Initialize Minio Client object.
-### 1. Minio
+# Golang Client API Reference [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+## Initialize Minio Client object.
+
+## Minio
```go
+
package main
import (
@@ -22,10 +25,13 @@ func main() {
return
}
}
+
```
-### 2. AWS S3
+
+## AWS S3
```go
+
package main
import (
@@ -45,25 +51,30 @@ func main() {
return
}
}
+
```
-| Bucket operations |Object operations | Presigned operations | Bucket Policy Operations |
+
+| Bucket operations |Object operations | Presigned operations | Bucket Policy/Notification Operations |
|:---|:---|:---|:---|
|[`MakeBucket`](#MakeBucket) |[`GetObject`](#GetObject) | [`PresignedGetObject`](#PresignedGetObject) |[`SetBucketPolicy`](#SetBucketPolicy) |
|[`ListBuckets`](#ListBuckets) |[`PutObject`](#PutObject) |[`PresignedPutObject`](#PresignedPutObject) | [`GetBucketPolicy`](#GetBucketPolicy) |
-|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | |
-| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | |
-|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | |
-|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | |
-|[`ListIncompleteUploads`](#ListIncompleteUploads) |[`FPutObject`](#FPutObject) | | |
+|[`BucketExists`](#BucketExists) |[`CopyObject`](#CopyObject) |[`PresignedPostPolicy`](#PresignedPostPolicy) | [`ListBucketPolicies`](#ListBucketPolicies) |
+| [`RemoveBucket`](#RemoveBucket) |[`StatObject`](#StatObject) | | [`SetBucketNotification`](#SetBucketNotification) |
+|[`ListObjects`](#ListObjects) |[`RemoveObject`](#RemoveObject) | | [`GetBucketNotification`](#GetBucketNotification) |
+|[`ListObjectsV2`](#ListObjectsV2) | [`RemoveObjects`](#RemoveObjects) | | [`RemoveAllBucketNotification`](#RemoveAllBucketNotification) |
+|[`ListIncompleteUploads`](#ListIncompleteUploads) | [`RemoveIncompleteUpload`](#RemoveIncompleteUpload) | | [`ListenBucketNotification`](#ListenBucketNotification) |
+| | [`FPutObject`](#FPutObject) | | |
| | [`FGetObject`](#FGetObject) | | |
## 1. Constructor
-<a name="Minio">
-#### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*Client, error)
+<a name="Minio"></a>
+
+### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*Client, error)
Initializes a new client object.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`endpoint` | _string_ |S3 object storage endpoint. |
@@ -71,83 +82,66 @@ __Parameters__
| `secretAccessKey` | _string_ |Secret key for the object storage endpoint. |
|`ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
## 2. Bucket operations
-<a name="MakeBucket">
-#### MakeBucket(bucketName string, location string) error
+<a name="MakeBucket"></a>
+### MakeBucket(bucketName string, location string) error
Creates a new bucket.
+
__Parameters__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>bucketName</td>
- <td>string </td>
- <td>name of the bucket</td>
- </tr>
- <tr>
- <td>
- location
- </td>
- <td> string</td>
- <td> Default value is _us-east-1_ <br/>
-
-Region valid values are: [ _us-west-1_, _us-west-2_, _eu-west-1_, _eu-central-1_, _ap-southeast-1_, _ap-northeast-1_, _ap-southeast-2_, _sa-east-1_ ].
- </td>
- </tr>
- </tbody>
-</table>
+| Param | Type | Description |
+|---|---|---|
+|`bucketName` | _string_ | Name of the bucket. |
+| `location` | _string_ | Default value is us-east-1 Region where the bucket is created. Valid values are listed below:|
+| | |us-east-1 |
+| | |us-west-1 |
+| | |us-west-2 |
+| | |eu-west-1 |
+| | | eu-central-1|
+| | | ap-southeast-1|
+| | | ap-northeast-1|
+| | | ap-southeast-2|
+| | | sa-east-1|
+
__Example__
+
```go
+
err := minioClient.MakeBucket("mybucket", "us-east-1")
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Successfully created mybucket.")
+
```
-<a name="ListBuckets">
-#### ListBuckets() ([]BucketInfo, error)
+<a name="ListBuckets"></a>
+### ListBuckets() ([]BucketInfo, error)
+
Lists all buckets.
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>
- bucketList
- </td>
- <td> []BucketInfo </td>
- <td>
- <ul>Lists bucket in following format:
- <li>bucket.Name string: bucket name.</li>
- <li>bucket.CreationDate time.Time : date when bucket was created.</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+| Param | Type | Description |
+|---|---|---|
+|`bucketList` | _[]BucketInfo_ | Lists bucket in following format shown below: |
+
+
+| Param | Type | Description |
+|---|---|---|
+|`bucket.Name` | _string_ | bucket name. |
+|`bucket.CreationDate` | _time.Time_ | date when bucket was created. |
+
__Example__
+
```go
+
buckets, err := minioClient.ListBuckets()
if err != nil {
fmt.Println(err)
@@ -156,54 +150,79 @@ if err != nil {
for _, bucket := range buckets {
fmt.Println(bucket)
}
+
```
-<a name="BucketExists">
-#### BucketExists(bucketName string) error
+<a name="BucketExists"></a>
+### BucketExists(bucketName string) (found bool, err error)
+
Checks if a bucket exists.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`found` | _bool_ | indicates whether bucket exists or not |
+|`err` | _error_ | standard error |
+
+
__Example__
+
```go
-err := minioClient.BucketExists("mybucket")
+
+found, err := minioClient.BucketExists("mybucket")
if err != nil {
fmt.Println(err)
return
}
+if found {
+ fmt.Println("Bucket found")
+}
+
```
-<a name="RemoveBucket">
-#### RemoveBucket(bucketName string)
+<a name="RemoveBucket"></a>
+### RemoveBucket(bucketName string) error
+
Removes a bucket.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
__Example__
+
```go
+
err := minioClient.RemoveBucket("mybucket")
if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="ListObjects">
-#### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+<a name="ListObjects"></a>
+### ListObjects(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+
Lists objects in a bucket.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -211,35 +230,23 @@ __Parameters__
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
__Return Value__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>
- chan ObjectInfo
- </td>
- <td> chan ObjectInfo </td>
- <td>
- <ul>Read channel for all the objects in the bucket, the object is of the format:
- <li>objectInfo.Key string: name of the object.</li>
- <li>objectInfo.Size int64: size of the object.</li>
- <li>objectInfo.ETag string: etag of the object. </li>
- <li>objectInfo.LastModified time.Time: modified time stamp.</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |name of the object. |
+|`objectInfo.Size` | _int64_ |size of the object. |
+|`objectInfo.ETag` | _string_ |etag of the object. |
+|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+
```go
+
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{})
@@ -255,14 +262,18 @@ for object := range objectCh {
}
fmt.Println(object)
}
+
```
-<a name="ListObjectsV2">
-#### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+
+<a name="ListObjectsV2"></a>
+### ListObjectsV2(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <-chan ObjectInfo
+
Lists objects in a bucket using the recommanded listing API v2
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -270,35 +281,23 @@ __Parameters__
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
__Return Value__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>
- chan ObjectInfo
- </td>
- <td> chan ObjectInfo </td>
- <td>
- <ul>Read channel for all the objects in the bucket, the object is of the format:
- <li>objectInfo.Key string: name of the object.</li>
- <li>objectInfo.Size int64: size of the object.</li>
- <li>objectInfo.ETag string: etag of the object. </li>
- <li>objectInfo.LastModified time.Time: modified time stamp.</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectInfo` | _chan ObjectInfo_ |Read channel for all the objects in the bucket, the object is of the format listed below: |
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objectInfo.Key` | _string_ |name of the object. |
+|`objectInfo.Size` | _int64_ |size of the object. |
+|`objectInfo.ETag` | _string_ |etag of the object. |
+|`objectInfo.LastModified` | _time.Time_ |modified time stamp. |
+
```go
+
// Create a done channel to control 'ListObjectsV2' go routine.
doneCh := make(chan struct{})
@@ -314,16 +313,18 @@ for object := range objectCh {
}
fmt.Println(object)
}
-```
+```
+<a name="ListIncompleteUploads"></a>
+### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
-<a name="ListIncompleteUploads">
-#### ListIncompleteUploads(bucketName string, prefix string, recursive bool, doneCh chan struct{}) <- chan ObjectMultipartInfo
Lists partially uploaded objects in a bucket.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -331,36 +332,26 @@ __Parameters__
| `recursive` | _bool_ |`true` indicates recursive style listing and `false` indicates directory style listing delimited by '/'. |
|`doneCh` | _chan struct{}_ | Set this value to 'true' to enable secure (HTTPS) access. |
+
+__Return Value__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan ObjectMultipartInfo` | _chan ObjectMultipartInfo_ |emits multipart objects of the format listed below: |
+
__Return Value__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>
- chan ObjectMultipartInfo
- </td>
- <td> chan ObjectMultipartInfo </td>
- <td>
- <ul>emits multipart objects of the format:
- <li>multiPartObjInfo.Key string: name of the incomplete object.</li>
- <li>multiPartObjInfo.UploadID string: upload ID of the incomplete object.</li>
- <li>multiPartObjInfo.Size int64: size of the incompletely uploaded object.</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+|Param |Type |Description |
+|:---|:---| :---|
+|`multiPartObjInfo.Key` | _string_ |name of the incomplete object. |
+|`multiPartObjInfo.UploadID` | _string_ |upload ID of the incomplete object.|
+|`multiPartObjInfo.Size` | _int64_ |size of the incompletely uploaded object.|
__Example__
+
```go
+
// Create a done channel to control 'ListObjects' go routine.
doneCh := make(chan struct{})
@@ -376,29 +367,39 @@ for multiPartObject := range multiPartObjectCh {
}
fmt.Println(multiPartObject)
}
+
```
## 3. Object operations
-<a name="GetObject">
-#### GetObject(bucketName string, objectName string) (*Object, error)
+
+<a name="GetObject"></a>
+### GetObject(bucketName string, objectName string) (*Object, error)
+
Downloads an object.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
|`objectName` | _string_ |name of the object. |
+
__Return Value__
+
|Param |Type |Description |
|:---|:---| :---|
|`object` | _*minio.Object_ |_minio.Object_ represents object reader |
+
__Example__
+
```go
+
object, err := minioClient.GetObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
@@ -413,11 +414,14 @@ if _, err = io.Copy(localFile, object); err != nil {
fmt.Println(err)
return
}
+
```
-<a name="FGetObject">
-#### FGetObject(bucketName string, objectName string, filePath string) error
+
+<a name="FGetObject"></a>
+### FGetObject(bucketName string, objectName string, filePath string) error
Downloads and saves the object as a file in the local filesystem.
+
__Parameters__
@@ -427,22 +431,29 @@ __Parameters__
|`objectName` | _string_ |name of the object. |
|`filePath` | _string_ |path to which the object data will be written to. |
+
__Example__
+
```go
+
err := minioClient.FGetObject("mybucket", "photo.jpg", "/tmp/photo.jpg")
if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="PutObject">
-#### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+<a name="PutObject"></a>
+### PutObject(bucketName string, objectName string, reader io.Reader, contentType string) (n int, err error)
+
Uploads an object.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -450,13 +461,17 @@ __Parameters__
|`reader` | _io.Reader_ |Any golang object implementing io.Reader. |
|`contentType` | _string_ |content type of the object. |
+
__Example__
+
Uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, PutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that PutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, PutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
+
```go
+
file, err := os.Open("my-testfile")
if err != nil {
fmt.Println(err)
@@ -469,24 +484,32 @@ if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="CopyObject">
-#### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
+
+<a name="CopyObject"></a>
+### CopyObject(bucketName string, objectName string, objectSource string, conditions CopyConditions) error
+
Copy a source object into a new object with the provided name in the provided bucket.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
|`objectName` | _string_ |name of the object. |
|`objectSource` | _string_ |name of the object source. |
-|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. ['x-amz-copy-source', 'x-amz-copy-source-if-match', 'x-amz-copy-source-if-none-match', 'x-amz-copy-source-if-unmodified-since', 'x-amz-copy-source-if-modified-since'].|
+|`conditions` | _CopyConditions_ |Collection of supported CopyObject conditions. [`x-amz-copy-source`, `x-amz-copy-source-if-match`, `x-amz-copy-source-if-none-match`, `x-amz-copy-source-if-unmodified-since`, `x-amz-copy-source-if-modified-since`].|
+
__Example__
+
```go
+
// All following conditions are allowed and can be combined together.
// Set copy conditions.
@@ -508,14 +531,18 @@ if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="FPutObject">
-#### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
+<a name="FPutObject"></a>
+### FPutObject(bucketName string, objectName string, filePath string, contentType string) error
+
Uploads contents from a file to objectName.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -523,95 +550,133 @@ __Parameters__
|`filePath` | _string_ |file path of the file to be uploaded. |
|`contentType` | _string_ |content type of the object. |
+
__Example__
+
FPutObject uploads objects that are less than 5MiB in a single PUT operation. For objects that are greater than the 5MiB in size, FPutObject seamlessly uploads the object in chunks of 5MiB or more depending on the actual file size. The max upload size for an object is 5TB.
In the event that FPutObject fails to upload an object, the user may attempt to re-upload the same object. If the same object is being uploaded, FPutObject API examines the previous partial attempt to upload this object and resumes automatically from where it left off.
```go
+
n, err := minioClient.FPutObject("mybucket", "myobject.csv", "/tmp/otherobject.csv", "application/csv")
if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="StatObject">
-#### StatObject(bucketName string, objectName string) (ObjectInfo, error)
+<a name="StatObject"></a>
+### StatObject(bucketName string, objectName string) (ObjectInfo, error)
+
Gets metadata of an object.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
|`objectName` | _string_ |name of the object. |
+
__Return Value__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>
- objInfo
- </td>
- <td> ObjectInfo</td>
- <td>
- <ul>object stat info for following format:
- <li>objInfo.Size int64: size of the object.</li>
- <li>objInfo.ETag string: etag of the object.</li>
- <li>objInfo.ContentType string: Content-Type of the object.</li>
- <li>objInfo.LastModified time.Time: modified time stamp</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+|Param |Type |Description |
+|:---|:---| :---|
+|`objInfo` | _ObjectInfo_ |object stat info for format listed below: |
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`objInfo.LastModified` | _time.Time_ |modified time stamp. |
+|`objInfo.ETag` | _string_ |etag of the object.|
+|`objInfo.ContentType` | _string_ |Content-Type of the object.|
+|`objInfo.Size` | _int64_ |size of the object.|
+
__Example__
+
```go
+
objInfo, err := minioClient.StatObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(objInfo)
+
```
-<a name="RemoveObject">
-#### RemoveObject(bucketName string, objectName string) error
+<a name="RemoveObject"></a>
+### RemoveObject(bucketName string, objectName string) error
+
Removes an object.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
|`objectName` | _string_ |name of the object. |
+
```go
+
err := minioClient.RemoveObject("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
+
+```
+<a name="RemoveObjects"></a>
+### RemoveObjects(bucketName string, objectsCh chan string) errorCh chan minio.RemoveObjectError
+
+Removes a list of objects obtained from an input channel. The call internally buffers up `1000` at
+a time and initiates a delete request to the server. Upon any error is sent through the error channel.
+
+__Parameters__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectsCh` | _chan string_ | write prefixes of objects to be removed |
+
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`errorCh` | _chan minio.RemoveObjectError | read objects deletion errors |
+
+
+
+```go
+
+errorCh := minioClient.RemoveObjects("mybucket", objectsCh)
+for e := range errorCh {
+ fmt.Println("Error detected during deletion: " + e.Err.Error())
+}
+
```
-<a name="RemoveIncompleteUpload">
-#### RemoveIncompleteUpload(bucketName string, objectName string) error
+
+
+<a name="RemoveIncompleteUpload"></a>
+### RemoveIncompleteUpload(bucketName string, objectName string) error
+
Removes a partially uploaded object.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -619,22 +684,28 @@ __Parameters__
__Example__
+
```go
+
err := minioClient.RemoveIncompleteUpload("mybucket", "photo.jpg")
if err != nil {
fmt.Println(err)
return
}
+
```
## 4. Presigned operations
-<a name="PresignedGetObject">
-#### PresignedGetObject(bucketName string, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+
+<a name="PresignedGetObject"></a>
+### PresignedGetObject(bucketName string, objectName string, expiry time.Duration, reqParams url.Values) (*url.URL, error)
+
Generates a presigned URL for HTTP GET operations. Browsers/Mobile clients may point to this URL to directly download objects even if the bucket is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -642,9 +713,12 @@ __Parameters__
|`expiry` | _time.Duration_ |expiry in seconds. |
|`reqParams` | _url.Values_ |additional response header overrides supports _response-expires_, _response-content-type_, _response-cache-control_, _response-content-disposition_. |
+
__Example__
+
```go
+
// Set request parameters for content-disposition.
reqParams := make(url.Values)
reqParams.Set("response-content-disposition", "attachment; filename=\"your-filename.txt\"")
@@ -655,26 +729,33 @@ if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="PresignedPutObject">
-#### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
+<a name="PresignedPutObject"></a>
+### PresignedPutObject(bucketName string, objectName string, expiry time.Duration) (*url.URL, error)
+
Generates a presigned URL for HTTP PUT operations. Browsers/Mobile clients may point to this URL to upload objects directly to a bucket even if it is private. This presigned URL can have an associated expiration time in seconds after which it is no longer operational. The default expiry is set to 7 days.
NOTE: you can upload to S3 only with specified object name.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
|`objectName` | _string_ |name of the object. |
|`expiry` | _time.Duration_ |expiry in seconds. |
+
__Example__
+
```go
+
// Generates a url which expires in a day.
expiry := time.Second * 24 * 60 * 60 // 1 day.
presignedURL, err := minioClient.PresignedPutObject("mybucket", "myobject", expiry)
@@ -683,20 +764,28 @@ if err != nil {
return
}
fmt.Println(presignedURL)
+
```
-<a name="PresignedPostPolicy">
-#### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+<a name="PresignedPostPolicy"></a>
+### PresignedPostPolicy(PostPolicy) (*url.URL, map[string]string, error)
+
Allows setting policy conditions to a presigned URL for POST operations. Policies such as bucket name to receive object uploads, key name prefixes, expiry policy may be set.
Create policy :
+
```go
+
policy := minio.NewPostPolicy()
+
```
+
Apply upload policy restrictions:
+
```go
+
policy.SetBucket("mybucket")
policy.SetKey("myobject")
policy.SetExpires(time.Now().UTC().AddDate(0, 0, 10)) // expires in 10 days
@@ -714,10 +803,13 @@ if err != nil {
fmt.Println(err)
return
}
+
```
+
POST your content from the command line using `curl`:
+
```go
fmt.Printf("curl ")
for k, v := range formData {
@@ -727,72 +819,60 @@ fmt.Printf("-F file=@/etc/bash.bashrc ")
fmt.Printf("%s\n", url)
```
-## 5. Bucket policy operations
+## 5. Bucket policy/notification operations
+
+<a name="SetBucketPolicy"></a>
+### SetBucketPolicy(bucketname string, objectPrefix string, policy policy.BucketPolicy) error
-<a name="SetBucketPolicy">
-#### SetBucketPolicy(bucketname string, objectPrefix string, policy BucketPolicy) error
Set access permissions on bucket or an object prefix.
+Importing `github.com/minio/minio-go/pkg/policy` package is needed.
+
__Parameters__
-<table>
- <thead>
- <tr>
- <th>Param</th>
- <th>Type</th>
- <th>Description</th>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td>bucketName</td>
- <td>string</td>
- <td>name of the bucket</td>
- </tr>
- <tr>
- <td>objectPrefix</td>
- <td>string</td>
- <td>name of the object prefix</td>
- </tr>
- <tr>
- <td>
- policy
- </td>
- <td> BucketPolicy</td>
- <td>
- <ul>policy can be <br/>
- <li>BucketPolicyNone,</li>
- <li>BucketPolicyReadOnly,</li>
- <li>BucketPolicyReadWrite,</li>
- <li>BucketPolicyWriteOnly</li>
- </ul>
- </td>
- </tr>
- </tbody>
-</table>
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket.|
+|`objectPrefix` | _string_ |name of the object prefix.|
+|`policy` | _policy.BucketPolicy_ |policy can be:|
+|| |policy.BucketPolicyNone|
+| | |policy.BucketPolicyReadOnly|
+|| |policy.BucketPolicyReadWrite|
+| | |policy.BucketPolicyWriteOnly|
+
__Return Values__
+
|Param |Type |Description |
|:---|:---| :---|
|`err` | _error_ |standard error |
+
__Example__
+
```go
-err := minioClient.SetBucketPolicy("mybucket", "myprefix", BucketPolicyReadWrite)
+
+err := minioClient.SetBucketPolicy("mybucket", "myprefix", policy.BucketPolicyReadWrite)
if err != nil {
fmt.Println(err)
return
}
+
```
-<a name="GetBucketPolicy">
-#### GetBucketPolicy(bucketName string, objectPrefix string) (BucketPolicy, error)
+<a name="GetBucketPolicy"></a>
+### GetBucketPolicy(bucketName string, objectPrefix string) (policy.BucketPolicy, error)
+
Get access permissions on a bucket or a prefix.
+Importing `github.com/minio/minio-go/pkg/policy` package is needed.
+
__Parameters__
+
|Param |Type |Description |
|:---|:---| :---|
|`bucketName` | _string_ |name of the bucket. |
@@ -800,40 +880,222 @@ __Parameters__
__Return Values__
+
|Param |Type |Description |
|:---|:---| :---|
-|`bucketPolicy` | _BucketPolicy_ |string that contains: 'none', 'readonly', 'readwrite', or 'writeonly |
+|`bucketPolicy` | _policy.BucketPolicy_ |string that contains: `none`, `readonly`, `readwrite`, or `writeonly` |
|`err` | _error_ |standard error |
__Example__
+
```go
+
bucketPolicy, err := minioClient.GetBucketPolicy("mybucket", "")
if err != nil {
fmt.Println(err)
return
}
fmt.Println("Access permissions for mybucket is", bucketPolicy)
+
```
-## 6. Explore Further
-- [Build your own Go Music Player App example](/docs/go-music-player-app)
+<a name="ListBucketPolicies"></a>
+### ListBucketPolicies(bucketName string, objectPrefix string) (map[string]BucketPolicy, error)
+
+Get access permissions rules associated to the specified bucket and prefix.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`objectPrefix` | _string_ |name of the object prefix |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketPolicies` | _map[string]BucketPolicy_ |map that contains object resources paths with their associated permissions |
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+
+bucketPolicies, err := minioClient.ListBucketPolicies("mybucket", "")
+if err != nil {
+ fmt.Println(err)
+ return
+}
+for resource, permission := range bucketPolicies {
+ fmt.Println(resource, " => ", permission)
+}
+
+```
+
+<a name="GetBucketNotification"></a>
+### GetBucketNotification(bucketName string) (BucketNotification, error)
+
+Get all notification configurations related to the specified bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+__Return Values__
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketNotification` | _BucketNotification_ |structure which holds all notification configurations|
+|`err` | _error_ |standard error |
+__Example__
+```go
+bucketNotification, err := minioClient.GetBucketNotification("mybucket")
+if err != nil {
+ for _, topicConfig := range bucketNotification.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ fmt.Println(e + " event is enabled")
+ }
+ }
+}
+```
+<a name="SetBucketNotification"></a>
+### SetBucketNotification(bucketName string, bucketNotification BucketNotification) error
+Set a new bucket notification on a bucket.
+__Parameters__
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+|`bucketNotification` | _BucketNotification_ |bucket notification. |
+__Return Values__
+|Param |Type |Description |
+|:---|:---| :---|
+|`err` | _error_ |standard error |
+__Example__
+```go
+topicArn := NewArn("aws", "sns", "us-east-1", "804605494417", "PhotoUpdate")
+
+topicConfig := NewNotificationConfig(topicArn)
+topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+lambdaConfig.AddFilterPrefix("photos/")
+lambdaConfig.AddFilterSuffix(".jpg")
+
+bucketNotification := BucketNotification{}
+bucketNotification.AddTopic(topicConfig)
+err := c.SetBucketNotification(bucketName, bucketNotification)
+if err != nil {
+ fmt.Println("Cannot set the bucket notification: " + err)
+}
+```
+
+<a name="RemoveAllBucketNotification"></a>
+### RemoveAllBucketNotification(bucketName string) error
+
+Remove all configured bucket notifications on a bucket.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ |name of the bucket. |
+
+__Return Values__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`err` | _error_ |standard error |
+
+__Example__
+
+
+```go
+err := c.RemoveAllBucketNotification(bucketName)
+if err != nil {
+ fmt.Println("Cannot remove bucket notifications.")
+}
+```
+
+<a name="ListenBucketNotification"></a>
+### ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo
+
+ListenBucketNotification API receives bucket notification events through the
+notification channel. The returned notification channel has two fields
+'Records' and 'Err'.
+
+- 'Records' holds the notifications received from the server.
+- 'Err' indicates any error while processing the received notifications.
+
+NOTE: Notification channel is closed at the first occurrence of an error.
+
+__Parameters__
+
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`bucketName` | _string_ | Bucket to listen notifications from. |
+|`prefix` | _string_ | Object key prefix to filter notifications for. |
+|`suffix` | _string_ | Object key suffix to filter notifications for. |
+|`events` | _[]string_| Enables notifications for specific event types. |
+|`doneCh` | _chan struct{}_ | A message on this channel ends the ListenBucketNotification loop. |
+
+__Return Values__
+
+|Param |Type |Description |
+|:---|:---| :---|
+|`chan NotificationInfo` | _chan_ | Read channel for all notificatons on bucket. |
+|`NotificationInfo` | _object_ | Notification object represents events info. |
+|`notificationInfo.Records` | _[]NotificationEvent_ | Collection of notification events. |
+|`notificationInfo.Err` | _error_ | Carries any error occurred during the operation. |
+
+
+__Example__
+
+
+```go
+
+// Create a done channel to control 'ListenBucketNotification' go routine.
+doneCh := make(chan struct{})
+
+// Indicate a background go-routine to exit cleanly upon return.
+defer close(doneCh)
+
+// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+}, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
+}
+```
+
+## 6. Explore Further
+- [Build your own Go Music Player App example](https://docs.minio.io/docs/go-music-player-app)
diff --git a/examples/minio/listenbucketnotification.go b/examples/minio/listenbucketnotification.go
new file mode 100644
index 0000000..b682dcb
--- /dev/null
+++ b/examples/minio/listenbucketnotification.go
@@ -0,0 +1,59 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Create a done channel to control 'ListenBucketNotification' go routine.
+ doneCh := make(chan struct{})
+
+ // Indicate to our routine to exit cleanly upon return.
+ defer close(doneCh)
+
+ // Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
+ for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
+ "s3:ObjectCreated:*",
+ "s3:ObjectRemoved:*",
+ }, doneCh) {
+ if notificationInfo.Err != nil {
+ log.Fatalln(notificationInfo.Err)
+ }
+ log.Println(notificationInfo)
+ }
+}
diff --git a/examples/s3/bucketexists.go b/examples/s3/bucketexists.go
index ad388d9..945510d 100644
--- a/examples/s3/bucketexists.go
+++ b/examples/s3/bucketexists.go
@@ -38,10 +38,14 @@ func main() {
log.Fatalln(err)
}
- err = s3Client.BucketExists("my-bucketname")
+ found, err := s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
- log.Println("Success")
+ if found {
+ log.Println("Bucket found.")
+ } else {
+ log.Println("Bucket not found.")
+ }
}
diff --git a/examples/s3/getbucketnotification.go b/examples/s3/getbucketnotification.go
new file mode 100644
index 0000000..67f010e
--- /dev/null
+++ b/examples/s3/getbucketnotification.go
@@ -0,0 +1,55 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ notifications, err := s3Client.GetBucketNotification("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Bucket notification are successfully retrieved.")
+
+ for _, topicConfig := range notifications.TopicConfigs {
+ for _, e := range topicConfig.Events {
+ log.Println(e + " event is enabled.")
+ }
+ }
+}
diff --git a/examples/s3/listbucketpolicies.go b/examples/s3/listbucketpolicies.go
new file mode 100644
index 0000000..19a2d1b
--- /dev/null
+++ b/examples/s3/listbucketpolicies.go
@@ -0,0 +1,56 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // Fetch the policy at 'my-objectprefix'.
+ policies, err := s3Client.ListBucketPolicies("my-bucketname", "my-objectprefix")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // ListBucketPolicies returns a map of objects policy rules and their associated permissions
+ // e.g. mybucket/downloadfolder/* => readonly
+ // mybucket/shared/* => readwrite
+
+ for resource, permission := range policies {
+ log.Println(resource, " => ", permission)
+ }
+}
diff --git a/examples/s3/putobject-progress.go b/examples/s3/putobject-progress.go
index e2da5bd..f668adf 100644
--- a/examples/s3/putobject-progress.go
+++ b/examples/s3/putobject-progress.go
@@ -21,8 +21,8 @@ package main
import (
"log"
+ "github.com/cheggaaa/pb"
"github.com/minio/minio-go"
- "github.com/minio/pb"
)
func main() {
diff --git a/examples/s3/removeallbucketnotification.go b/examples/s3/removeallbucketnotification.go
new file mode 100644
index 0000000..0f5f3a7
--- /dev/null
+++ b/examples/s3/removeallbucketnotification.go
@@ -0,0 +1,49 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ err = s3Client.RemoveAllBucketNotification("my-bucketname")
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ log.Println("Bucket notification are successfully removed.")
+}
diff --git a/examples/s3/removeobjects.go b/examples/s3/removeobjects.go
new file mode 100644
index 0000000..5946069
--- /dev/null
+++ b/examples/s3/removeobjects.go
@@ -0,0 +1,61 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+ "strconv"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
+ // are dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ objectsCh := make(chan string)
+
+ // Send object names that are needed to be removed to objectsCh
+ go func() {
+ defer close(objectsCh)
+ for i := 0; i < 10; i++ {
+ objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
+ }
+ }()
+
+ // Call RemoveObjects API
+ errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh)
+
+ // Print errors received from RemoveObjects API
+ for e := range errorCh {
+ log.Fatalln("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error())
+ }
+
+ log.Println("Success")
+}
diff --git a/examples/s3/setbucketnotification.go b/examples/s3/setbucketnotification.go
new file mode 100644
index 0000000..5fe1e31
--- /dev/null
+++ b/examples/s3/setbucketnotification.go
@@ -0,0 +1,85 @@
+// +build ignore
+
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package main
+
+import (
+ "log"
+
+ "github.com/minio/minio-go"
+)
+
+func main() {
+ // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
+ // dummy values, please replace them with original values.
+
+ // Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
+ // This boolean value is the last argument for New().
+
+ // New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
+ // determined based on the Endpoint value.
+ s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
+ if err != nil {
+ log.Fatalln(err)
+ }
+
+ // s3Client.TraceOn(os.Stderr)
+
+ // ARN represents a notification channel that needs to be created in your S3 provider
+ // (e.g. http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html)
+
+ // An example of an ARN:
+ // arn:aws:sns:us-east-1:804064459714:UploadPhoto
+ // ^ ^ ^ ^ ^
+ // Provider __| | | | |
+ // | Region Account ID |_ Notification Name
+ // Service _|
+ //
+ // You should replace YOUR-PROVIDER, YOUR-SERVICE, YOUR-REGION, YOUR-ACCOUNT-ID and YOUR-RESOURCE
+ // with actual values that you receive from the S3 provider
+
+ // Here you create a new Topic notification
+ topicArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ topicConfig := minio.NewNotificationConfig(topicArn)
+ topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
+ topicConfig.AddFilterPrefix("photos/")
+ topicConfig.AddFilterSuffix(".jpg")
+
+ // Create a new Queue notification
+ queueArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ queueConfig := minio.NewNotificationConfig(queueArn)
+ queueConfig.AddEvents(minio.ObjectRemovedAll)
+
+ // Create a new Lambda (CloudFunction)
+ lambdaArn := minio.NewArn("YOUR-PROVIDER", "YOUR-SERVICE", "YOUR-REGION", "YOUR-ACCOUNT-ID", "YOUR-RESOURCE")
+ lambdaConfig := minio.NewNotificationConfig(lambdaArn)
+ lambdaConfig.AddEvents(minio.ObjectRemovedAll)
+ lambdaConfig.AddFilterSuffix(".swp")
+
+ // Now, set all previously created notification configs
+ bucketNotification := minio.BucketNotification{}
+ bucketNotification.AddTopic(topicConfig)
+ bucketNotification.AddQueue(queueConfig)
+ bucketNotification.AddLambda(lambdaConfig)
+
+ err = s3Client.SetBucketNotification("YOUR-BUCKET", bucketNotification)
+ if err != nil {
+ log.Fatalln("Error: " + err.Error())
+ }
+ log.Println("Success")
+}
diff --git a/examples/s3/setbucketpolicy.go b/examples/s3/setbucketpolicy.go
index 3ffa3b6..40906ee 100644
--- a/examples/s3/setbucketpolicy.go
+++ b/examples/s3/setbucketpolicy.go
@@ -22,6 +22,7 @@ import (
"log"
"github.com/minio/minio-go"
+ "github.com/minio/minio-go/pkg/policy"
)
func main() {
@@ -41,11 +42,11 @@ func main() {
// s3Client.TraceOn(os.Stderr)
// Description of policy input.
- // minio.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
- // minio.BucketPolicyReadOnly - Set read-only operations at a prefix.
- // minio.BucketPolicyWriteOnly - Set write-only operations at a prefix.
- // minio.BucketPolicyReadWrite - Set read-write operations at a prefix.
- err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
+ // policy.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
+ // policy.BucketPolicyReadOnly - Set read-only operations at a prefix.
+ // policy.BucketPolicyWriteOnly - Set write-only operations at a prefix.
+ // policy.BucketPolicyReadWrite - Set read-write operations at a prefix.
+ err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", policy.BucketPolicyReadWrite)
if err != nil {
log.Fatalln(err)
}
diff --git a/pkg/policy/bucket-policy-condition.go b/pkg/policy/bucket-policy-condition.go
new file mode 100644
index 0000000..078bcd1
--- /dev/null
+++ b/pkg/policy/bucket-policy-condition.go
@@ -0,0 +1,115 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import "github.com/minio/minio-go/pkg/set"
+
+// ConditionKeyMap - map of policy condition key and value.
+type ConditionKeyMap map[string]set.StringSet
+
+// Add - adds key and value. The value is appended If key already exists.
+func (ckm ConditionKeyMap) Add(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ ckm[key] = v.Union(value)
+ } else {
+ ckm[key] = set.CopyStringSet(value)
+ }
+}
+
+// Remove - removes value of given key. If key has empty after removal, the key is also removed.
+func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) {
+ if v, ok := ckm[key]; ok {
+ if value != nil {
+ ckm[key] = v.Difference(value)
+ }
+
+ if ckm[key].IsEmpty() {
+ delete(ckm, key)
+ }
+ }
+}
+
+// RemoveKey - removes key and its value.
+func (ckm ConditionKeyMap) RemoveKey(key string) {
+ if _, ok := ckm[key]; ok {
+ delete(ckm, key)
+ }
+}
+
+// CopyConditionKeyMap - returns new copy of given ConditionKeyMap.
+func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap {
+ out := make(ConditionKeyMap)
+
+ for k, v := range condKeyMap {
+ out[k] = set.CopyStringSet(v)
+ }
+
+ return out
+}
+
+// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap.
+func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap {
+ out := CopyConditionKeyMap(condKeyMap1)
+
+ for k, v := range condKeyMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = ev.Union(v)
+ } else {
+ out[k] = set.CopyStringSet(v)
+ }
+ }
+
+ return out
+}
+
+// ConditionMap - map of condition and conditional values.
+type ConditionMap map[string]ConditionKeyMap
+
+// Add - adds condition key and condition value. The value is appended if key already exists.
+func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) {
+ if v, ok := cond[condKey]; ok {
+ cond[condKey] = mergeConditionKeyMap(v, condKeyMap)
+ } else {
+ cond[condKey] = CopyConditionKeyMap(condKeyMap)
+ }
+}
+
+// Remove - removes condition key and its value.
+func (cond ConditionMap) Remove(condKey string) {
+ if _, ok := cond[condKey]; ok {
+ delete(cond, condKey)
+ }
+}
+
+// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap.
+func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap {
+ out := make(ConditionMap)
+
+ for k, v := range condMap1 {
+ out[k] = CopyConditionKeyMap(v)
+ }
+
+ for k, v := range condMap2 {
+ if ev, ok := out[k]; ok {
+ out[k] = mergeConditionKeyMap(ev, v)
+ } else {
+ out[k] = CopyConditionKeyMap(v)
+ }
+ }
+
+ return out
+}
diff --git a/pkg/policy/bucket-policy-condition_test.go b/pkg/policy/bucket-policy-condition_test.go
new file mode 100644
index 0000000..419868f
--- /dev/null
+++ b/pkg/policy/bucket-policy-condition_test.go
@@ -0,0 +1,289 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// ConditionKeyMap.Add() is called and the result is validated.
+func TestConditionKeyMapAdd(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ testCases := []struct {
+ key string
+ value set.StringSet
+ expectedResult string
+ }{
+ // Add new key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
+ // Add existing key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello"]}`},
+ // Add existing key and not value.
+ {"s3:prefix", set.CreateStringSet("world"), `{"s3:prefix":["hello","world"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.Add(testCase.key, testCase.value)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionKeyMap.Remove() is called and the result is validated.
+func TestConditionKeyMapRemove(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ value set.StringSet
+ expectedResult string
+ }{
+ // Remove non-existent key and value.
+ {"s3:myprefix", set.CreateStringSet("hello"), `{"s3:prefix":["hello","world"]}`},
+ // Remove existing key and value.
+ {"s3:prefix", set.CreateStringSet("hello"), `{"s3:prefix":["world"]}`},
+ // Remove existing key to make the key also removed.
+ {"s3:prefix", set.CreateStringSet("world"), `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.Remove(testCase.key, testCase.value)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionKeyMap.RemoveKey() is called and the result is validated.
+func TestConditionKeyMapRemoveKey(t *testing.T) {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ expectedResult string
+ }{
+ // Remove non-existent key.
+ {"s3:myprefix", `{"s3:prefix":["hello","world"]}`},
+ // Remove existing key.
+ {"s3:prefix", `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap.RemoveKey(testCase.key)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// CopyConditionKeyMap() is called and the result is validated.
+func TestCopyConditionKeyMap(t *testing.T) {
+ emptyCondKeyMap := make(ConditionKeyMap)
+ nonEmptyCondKeyMap := make(ConditionKeyMap)
+ nonEmptyCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ condKeyMap ConditionKeyMap
+ expectedResult string
+ }{
+ // To test empty ConditionKeyMap.
+ {emptyCondKeyMap, `{}`},
+ // To test non-empty ConditionKeyMap.
+ {nonEmptyCondKeyMap, `{"s3:prefix":["hello","world"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap := CopyConditionKeyMap(testCase.condKeyMap)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// mergeConditionKeyMap() is called and the result is validated.
+func TestMergeConditionKeyMap(t *testing.T) {
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
+
+ condKeyMap3 := make(ConditionKeyMap)
+ condKeyMap3.Add("s3:myprefix", set.CreateStringSet("world"))
+
+ testCases := []struct {
+ condKeyMap1 ConditionKeyMap
+ condKeyMap2 ConditionKeyMap
+ expectedResult string
+ }{
+ // Both arguments are empty.
+ {make(ConditionKeyMap), make(ConditionKeyMap), `{}`},
+ // First argument is empty.
+ {make(ConditionKeyMap), condKeyMap1, `{"s3:prefix":["hello"]}`},
+ // Second argument is empty.
+ {condKeyMap1, make(ConditionKeyMap), `{"s3:prefix":["hello"]}`},
+ // Both arguments are same value.
+ {condKeyMap1, condKeyMap1, `{"s3:prefix":["hello"]}`},
+ // Value of second argument will be merged.
+ {condKeyMap1, condKeyMap2, `{"s3:prefix":["hello","world"]}`},
+ // second argument will be added.
+ {condKeyMap1, condKeyMap3, `{"s3:myprefix":["world"],"s3:prefix":["hello"]}`},
+ }
+
+ for _, testCase := range testCases {
+ condKeyMap := mergeConditionKeyMap(testCase.condKeyMap1, testCase.condKeyMap2)
+ if data, err := json.Marshal(condKeyMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionMap.Add() is called and the result is validated.
+func TestConditionMapAdd(t *testing.T) {
+ condMap := make(ConditionMap)
+
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+
+ testCases := []struct {
+ key string
+ value ConditionKeyMap
+ expectedResult string
+ }{
+ // Add new key and value.
+ {"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Add existing key and value.
+ {"StringEquals", condKeyMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Add existing key and not value.
+ {"StringEquals", condKeyMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap.Add(testCase.key, testCase.value)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// ConditionMap.Remove() is called and the result is validated.
+func TestConditionMapRemove(t *testing.T) {
+ condMap := make(ConditionMap)
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+ condMap.Add("StringEquals", condKeyMap)
+
+ testCases := []struct {
+ key string
+ expectedResult string
+ }{
+ // Remove non-existent key.
+ {"StringNotEquals", `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ // Remove existing key.
+ {"StringEquals", `{}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap.Remove(testCase.key)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// mergeConditionMap() is called and the result is validated.
+func TestMergeConditionMap(t *testing.T) {
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ condMap1 := make(ConditionMap)
+ condMap1.Add("StringEquals", condKeyMap1)
+
+ condKeyMap2 := make(ConditionKeyMap)
+ condKeyMap2.Add("s3:prefix", set.CreateStringSet("world"))
+ condMap2 := make(ConditionMap)
+ condMap2.Add("StringEquals", condKeyMap2)
+
+ condMap3 := make(ConditionMap)
+ condMap3.Add("StringNotEquals", condKeyMap2)
+
+ testCases := []struct {
+ condMap1 ConditionMap
+ condMap2 ConditionMap
+ expectedResult string
+ }{
+ // Both arguments are empty.
+ {make(ConditionMap), make(ConditionMap), `{}`},
+ // First argument is empty.
+ {make(ConditionMap), condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Second argument is empty.
+ {condMap1, make(ConditionMap), `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Both arguments are same value.
+ {condMap1, condMap1, `{"StringEquals":{"s3:prefix":["hello"]}}`},
+ // Value of second argument will be merged.
+ {condMap1, condMap2, `{"StringEquals":{"s3:prefix":["hello","world"]}}`},
+ // second argument will be added.
+ {condMap1, condMap3, `{"StringEquals":{"s3:prefix":["hello"]},"StringNotEquals":{"s3:prefix":["world"]}}`},
+ }
+
+ for _, testCase := range testCases {
+ condMap := mergeConditionMap(testCase.condMap1, testCase.condMap2)
+ if data, err := json.Marshal(condMap); err != nil {
+ t.Fatalf("Unable to marshal ConditionKeyMap to JSON, %s", err)
+ } else {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("case: %+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
diff --git a/pkg/policy/bucket-policy.go b/pkg/policy/bucket-policy.go
new file mode 100644
index 0000000..f618059
--- /dev/null
+++ b/pkg/policy/bucket-policy.go
@@ -0,0 +1,635 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "reflect"
+ "strings"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// BucketPolicy - Bucket level policy.
+type BucketPolicy string
+
+// Different types of Policies currently supported for buckets.
+const (
+ BucketPolicyNone BucketPolicy = "none"
+ BucketPolicyReadOnly = "readonly"
+ BucketPolicyReadWrite = "readwrite"
+ BucketPolicyWriteOnly = "writeonly"
+)
+
+// isValidBucketPolicy - Is provided policy value supported.
+func (p BucketPolicy) IsValidBucketPolicy() bool {
+ switch p {
+ case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
+ return true
+ }
+ return false
+}
+
+// Resource prefix for all aws resources.
+const awsResourcePrefix = "arn:aws:s3:::"
+
+// Common bucket actions for both read and write policies.
+var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation")
+
+// Read only bucket actions.
+var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket")
+
+// Write only bucket actions.
+var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads")
+
+// Read only object actions.
+var readOnlyObjectActions = set.CreateStringSet("s3:GetObject")
+
+// Write only object actions.
+var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject")
+
+// Read and write object actions.
+var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions)
+
+// All valid bucket and object actions.
+var validActions = commonBucketActions.
+ Union(readOnlyBucketActions).
+ Union(writeOnlyBucketActions).
+ Union(readOnlyObjectActions).
+ Union(writeOnlyObjectActions)
+
+var startsWithFunc = func(resource string, resourcePrefix string) bool {
+ return strings.HasPrefix(resource, resourcePrefix)
+}
+
+// User - canonical users list.
+type User struct {
+ AWS set.StringSet `json:"AWS,omitempty"`
+ CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"`
+}
+
+// Statement - minio policy statement
+type Statement struct {
+ Actions set.StringSet `json:"Action"`
+ Conditions ConditionMap `json:"Condition,omitempty"`
+ Effect string
+ Principal User `json:"Principal"`
+ Resources set.StringSet `json:"Resource"`
+ Sid string
+}
+
+// BucketAccessPolicy - minio policy collection
+type BucketAccessPolicy struct {
+ Version string // date in YYYY-MM-DD format
+ Statements []Statement `json:"Statement"`
+}
+
+// isValidStatement - returns whether given statement is valid to process for given bucket name.
+func isValidStatement(statement Statement, bucketName string) bool {
+ if statement.Actions.Intersection(validActions).IsEmpty() {
+ return false
+ }
+
+ if statement.Effect != "Allow" {
+ return false
+ }
+
+ if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") {
+ return false
+ }
+
+ bucketResource := awsResourcePrefix + bucketName
+ if statement.Resources.Contains(bucketResource) {
+ return true
+ }
+
+ if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() {
+ return false
+ }
+
+ return true
+}
+
+// Returns new statements with bucket actions for given policy.
+func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName)
+
+ statement := Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+
+ if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ if prefix != "" {
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix))
+ condMap := make(ConditionMap)
+ condMap.Add("StringEquals", condKeyMap)
+ statement.Conditions = condMap
+ }
+ statements = append(statements, statement)
+ }
+
+ if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite {
+ statement = Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: bucketResource,
+ Sid: "",
+ }
+ statements = append(statements, statement)
+ }
+
+ return statements
+}
+
+// Returns new statements contains object actions for given policy.
+func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ if policy == BucketPolicyNone || bucketName == "" {
+ return statements
+ }
+
+ statement := Statement{
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"),
+ Sid: "",
+ }
+
+ if policy == BucketPolicyReadOnly {
+ statement.Actions = readOnlyObjectActions
+ } else if policy == BucketPolicyWriteOnly {
+ statement.Actions = writeOnlyObjectActions
+ } else if policy == BucketPolicyReadWrite {
+ statement.Actions = readWriteObjectActions
+ }
+
+ statements = append(statements, statement)
+ return statements
+}
+
+// Returns new statements for given policy, bucket and prefix.
+func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) {
+ statements = []Statement{}
+ ns := newBucketStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ ns = newObjectStatement(policy, bucketName, prefix)
+ statements = append(statements, ns...)
+
+ return statements
+}
+
+// Returns whether given bucket statements are used by other than given prefix statements.
+func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) {
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ for _, s := range statements {
+ if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() {
+ if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnlyInUse = true
+ }
+
+ if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnlyInUse = true
+ }
+ }
+ if readOnlyInUse && writeOnlyInUse {
+ break
+ }
+ }
+
+ return readOnlyInUse, writeOnlyInUse
+}
+
+// Removes object actions in given statement.
+func removeObjectActions(statement Statement, objectResource string) Statement {
+ if statement.Conditions == nil {
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(objectResource)
+ } else {
+ statement.Actions = statement.Actions.Difference(readOnlyObjectActions)
+ statement.Actions = statement.Actions.Difference(writeOnlyObjectActions)
+ }
+ }
+
+ return statement
+}
+
+// Removes bucket actions for given policy in given statement.
+func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement {
+ removeReadOnly := func() {
+ if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ return
+ }
+
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ return
+ }
+
+ if prefix != "" {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+
+ values.Remove(prefix)
+
+ if stringEqualsValue != nil {
+ if values.IsEmpty() {
+ delete(stringEqualsValue, "s3:prefix")
+ }
+ if len(stringEqualsValue) == 0 {
+ delete(statement.Conditions, "StringEquals")
+ }
+ }
+
+ if len(statement.Conditions) == 0 {
+ statement.Conditions = nil
+ statement.Actions = statement.Actions.Difference(readOnlyBucketActions)
+ }
+ }
+ }
+
+ removeWriteOnly := func() {
+ if statement.Conditions == nil {
+ statement.Actions = statement.Actions.Difference(writeOnlyBucketActions)
+ }
+ }
+
+ if len(statement.Resources) > 1 {
+ statement.Resources.Remove(bucketResource)
+ } else {
+ if !readOnlyInUse {
+ removeReadOnly()
+ }
+
+ if !writeOnlyInUse {
+ removeWriteOnly()
+ }
+ }
+
+ return statement
+}
+
+// Returns statements containing removed actions/statements for given
+// policy, bucket name and prefix.
+func removeStatements(statements []Statement, bucketName string, prefix string) []Statement {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+ readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix)
+
+ out := []Statement{}
+ readOnlyBucketStatements := []Statement{}
+ s3PrefixValues := set.NewStringSet()
+
+ for _, statement := range statements {
+ if !isValidStatement(statement, bucketName) {
+ out = append(out, statement)
+ continue
+ }
+
+ if statement.Resources.Contains(bucketResource) {
+ if statement.Conditions != nil {
+ statement = removeBucketActions(statement, prefix, bucketResource, false, false)
+ } else {
+ statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse)
+ }
+ } else if statement.Resources.Contains(objectResource) {
+ statement = removeObjectActions(statement, objectResource)
+ }
+
+ if !statement.Actions.IsEmpty() {
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") {
+
+ if statement.Conditions != nil {
+ stringEqualsValue := statement.Conditions["StringEquals"]
+ values := set.NewStringSet()
+ if stringEqualsValue != nil {
+ values = stringEqualsValue["s3:prefix"]
+ if values == nil {
+ values = set.NewStringSet()
+ }
+ }
+ s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string {
+ return bucketResource + "/" + v + "*"
+ }))
+ } else if !s3PrefixValues.IsEmpty() {
+ readOnlyBucketStatements = append(readOnlyBucketStatements, statement)
+ continue
+ }
+ }
+ out = append(out, statement)
+ }
+ }
+
+ skipBucketStatement := true
+ resourcePrefix := awsResourcePrefix + bucketName + "/"
+ for _, statement := range out {
+ if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() &&
+ s3PrefixValues.Intersection(statement.Resources).IsEmpty() {
+ skipBucketStatement = false
+ break
+ }
+ }
+
+ for _, statement := range readOnlyBucketStatements {
+ if skipBucketStatement &&
+ statement.Resources.Contains(bucketResource) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ continue
+ }
+
+ out = append(out, statement)
+ }
+
+ if len(out) == 1 {
+ statement := out[0]
+ if statement.Resources.Contains(bucketResource) &&
+ statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ out = []Statement{}
+ }
+ }
+
+ return out
+}
+
+// Appends given statement into statement list to have unique statements.
+// - If statement already exists in statement list, it ignores.
+// - If statement exists with different conditions, they are merged.
+// - Else the statement is appended to statement list.
+func appendStatement(statements []Statement, statement Statement) []Statement {
+ for i, s := range statements {
+ if s.Actions.Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Resources = s.Resources.Union(statement.Resources)
+ return statements
+ } else if s.Resources.Equals(statement.Resources) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Equals(statement.Principal.AWS) &&
+ reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ statements[i].Actions = s.Actions.Union(statement.Actions)
+ return statements
+ }
+
+ if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) &&
+ s.Actions.Intersection(statement.Actions).Equals(statement.Actions) &&
+ s.Effect == statement.Effect &&
+ s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) {
+ if reflect.DeepEqual(s.Conditions, statement.Conditions) {
+ return statements
+ }
+ if s.Conditions != nil && statement.Conditions != nil {
+ if s.Resources.Equals(statement.Resources) {
+ statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions)
+ return statements
+ }
+ }
+ }
+ }
+
+ if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) {
+ return append(statements, statement)
+ }
+
+ return statements
+}
+
+// Appends two statement lists.
+func appendStatements(statements []Statement, appendStatements []Statement) []Statement {
+ for _, s := range appendStatements {
+ statements = appendStatement(statements, s)
+ }
+
+ return statements
+}
+
+// Returns policy of given bucket statement.
+func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) {
+ if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) {
+ return commonFound, readOnly, writeOnly
+ }
+
+ if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) &&
+ statement.Conditions == nil {
+ commonFound = true
+ }
+
+ if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) &&
+ statement.Conditions == nil {
+ writeOnly = true
+ }
+
+ if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) {
+ if prefix != "" && statement.Conditions != nil {
+ if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok {
+ if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok {
+ if s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok {
+ if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok {
+ if !s3PrefixValues.Contains(prefix) {
+ readOnly = true
+ }
+ }
+ }
+ } else if prefix == "" && statement.Conditions == nil {
+ readOnly = true
+ } else if prefix != "" && statement.Conditions == nil {
+ readOnly = true
+ }
+ }
+
+ return commonFound, readOnly, writeOnly
+}
+
+// Returns policy of given object statement.
+func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
+ if statement.Effect == "Allow" &&
+ statement.Principal.AWS.Contains("*") &&
+ statement.Conditions == nil {
+ if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) {
+ readOnly = true
+ }
+ if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) {
+ writeOnly = true
+ }
+ }
+
+ return readOnly, writeOnly
+}
+
+// Returns policy of given bucket name, prefix in given statements.
+func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
+ bucketResource := awsResourcePrefix + bucketName
+ objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
+
+ bucketCommonFound := false
+ bucketReadOnly := false
+ bucketWriteOnly := false
+ matchedResource := ""
+ objReadOnly := false
+ objWriteOnly := false
+
+ for _, s := range statements {
+ matchedObjResources := set.NewStringSet()
+ if s.Resources.Contains(objectResource) {
+ matchedObjResources.Add(objectResource)
+ } else {
+ matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource)
+ }
+
+ if !matchedObjResources.IsEmpty() {
+ readOnly, writeOnly := getObjectPolicy(s)
+ for resource := range matchedObjResources {
+ if len(matchedResource) < len(resource) {
+ objReadOnly = readOnly
+ objWriteOnly = writeOnly
+ matchedResource = resource
+ } else if len(matchedResource) == len(resource) {
+ objReadOnly = objReadOnly || readOnly
+ objWriteOnly = objWriteOnly || writeOnly
+ matchedResource = resource
+ }
+ }
+ } else if s.Resources.Contains(bucketResource) {
+ commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix)
+ bucketCommonFound = bucketCommonFound || commonFound
+ bucketReadOnly = bucketReadOnly || readOnly
+ bucketWriteOnly = bucketWriteOnly || writeOnly
+ }
+ }
+
+ policy := BucketPolicyNone
+ if bucketCommonFound {
+ if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly {
+ policy = BucketPolicyReadWrite
+ } else if bucketReadOnly && objReadOnly {
+ policy = BucketPolicyReadOnly
+ } else if bucketWriteOnly && objWriteOnly {
+ policy = BucketPolicyWriteOnly
+ }
+ }
+
+ return policy
+}
+
+// GetPolicies returns a map of policies rules of given bucket name, prefix in given statements.
+func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
+ policyRules := map[string]BucketPolicy{}
+ objResources := set.NewStringSet()
+ // Search all resources related to objects policy
+ for _, s := range statements {
+ for r := range s.Resources {
+ if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
+ objResources.Add(r)
+ }
+ }
+ }
+ // Pretend that policy resource as an actual object and fetch its policy
+ for r := range objResources {
+ // Put trailing * if exists in asterisk
+ asterisk := ""
+ if strings.HasSuffix(r, "*") {
+ r = r[:len(r)-1]
+ asterisk = "*"
+ }
+ objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
+ p := GetPolicy(statements, bucketName, objectPath)
+ policyRules[bucketName+"/"+objectPath+asterisk] = p
+ }
+ return policyRules
+}
+
+// Returns new statements containing policy of given bucket name and
+// prefix are appended.
+func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
+ out := removeStatements(statements, bucketName, prefix)
+ // fmt.Println("out = ")
+ // printstatement(out)
+ ns := newStatements(policy, bucketName, prefix)
+ // fmt.Println("ns = ")
+ // printstatement(ns)
+
+ rv := appendStatements(out, ns)
+ // fmt.Println("rv = ")
+ // printstatement(rv)
+
+ return rv
+}
+
+// Match function matches wild cards in 'pattern' for resource.
+func resourceMatch(pattern, resource string) bool {
+ if pattern == "" {
+ return resource == pattern
+ }
+ if pattern == "*" {
+ return true
+ }
+ parts := strings.Split(pattern, "*")
+ if len(parts) == 1 {
+ return resource == pattern
+ }
+ tGlob := strings.HasSuffix(pattern, "*")
+ end := len(parts) - 1
+ if !strings.HasPrefix(resource, parts[0]) {
+ return false
+ }
+ for i := 1; i < end; i++ {
+ if !strings.Contains(resource, parts[i]) {
+ return false
+ }
+ idx := strings.Index(resource, parts[i]) + len(parts[i])
+ resource = resource[idx:]
+ }
+ return tGlob || strings.HasSuffix(resource, parts[end])
+}
diff --git a/pkg/policy/bucket-policy_test.go b/pkg/policy/bucket-policy_test.go
new file mode 100644
index 0000000..b1862c6
--- /dev/null
+++ b/pkg/policy/bucket-policy_test.go
@@ -0,0 +1,1822 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package policy
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/minio/minio-go/pkg/set"
+)
+
+// isValidStatement() is called and the result is validated.
+func TestIsValidStatement(t *testing.T) {
+ testCases := []struct {
+ statement Statement
+ bucketName string
+ expectedResult bool
+ }{
+ // Empty statement and bucket name.
+ {Statement{}, "", false},
+ // Empty statement.
+ {Statement{}, "mybucket", false},
+ // Empty bucket name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false},
+ // Statement with unknown actions.
+ {Statement{
+ Actions: set.CreateStringSet("s3:ListBucketVersions"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with unknown effect.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with nil Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with unknown Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "mybucket", false},
+ // Statement with different bucket name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, "mybucket", false},
+ // Statement with bucket name with suffixed string.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybuckettest/myobject"),
+ }, "mybucket", false},
+ // Statement with bucket name and object name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/myobject"),
+ }, "mybucket", true},
+ // Statement with condition, bucket name and object name.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/myobject"),
+ }, "mybucket", true},
+ }
+
+ for _, testCase := range testCases {
+ if result := isValidStatement(testCase.statement, testCase.bucketName); result != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %t, got: %t", testCase, testCase.expectedResult, result)
+ }
+ }
+}
+
+// newStatements() is called and the result is validated.
+func TestNewStatements(t *testing.T) {
+ testCases := []struct {
+ policy BucketPolicy
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // BucketPolicyNone: with empty bucket name and prefix.
+ {BucketPolicyNone, "", "", `[]`},
+ // BucketPolicyNone: with bucket name and empty prefix.
+ {BucketPolicyNone, "mybucket", "", `[]`},
+ // BucketPolicyNone: with empty bucket name empty prefix.
+ {BucketPolicyNone, "", "hello", `[]`},
+ // BucketPolicyNone: with bucket name prefix.
+ {BucketPolicyNone, "mybucket", "hello", `[]`},
+ // BucketPolicyReadOnly: with empty bucket name and prefix.
+ {BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyReadOnly: with bucket name and empty prefix.
+ {BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadOnly: with empty bucket name empty prefix.
+ {BucketPolicyReadOnly, "", "hello", `[]`},
+ // BucketPolicyReadOnly: with bucket name prefix.
+ {BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyReadWrite: with empty bucket name and prefix.
+ {BucketPolicyReadWrite, "", "", `[]`},
+ // BucketPolicyReadWrite: with bucket name and empty prefix.
+ {BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadWrite: with empty bucket name empty prefix.
+ {BucketPolicyReadWrite, "", "hello", `[]`},
+ // BucketPolicyReadWrite: with bucket name prefix.
+ {BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly: with empty bucket name and prefix.
+ {BucketPolicyWriteOnly, "", "", `[]`},
+ // BucketPolicyWriteOnly: with bucket name and empty prefix.
+ {BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly: with empty bucket name empty prefix.
+ {BucketPolicyWriteOnly, "", "hello", `[]`},
+ // BucketPolicyWriteOnly: with bucket name prefix.
+ {BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := newStatements(testCase.policy, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err == nil {
+ if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+ }
+}
+
+// getInUsePolicy() is called and the result is validated.
+func TestGetInUsePolicy(t *testing.T) {
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult1 bool
+ expectedResult2 bool
+ }{
+ // All empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", false, false},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", false, false},
+ // Non-empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", false, false},
+ // Non-empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "hello", false, false},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", false, false},
+ // Empty statements, non-empty bucket name non-empty prefix.
+ {[]Statement{}, "mybucket", "hello", false, false},
+ // Empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{}, "", "hello", false, false},
+ // Non-empty statements, non-empty bucket name, non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", false, false},
+ // different bucket statements and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", false, false},
+ // different bucket statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", false, false},
+ // different bucket multi-statements and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket/world"),
+ }}, "mybucket", "", false, false},
+ // different bucket multi-statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket/world"),
+ }}, "mybucket", "hello", false, false},
+ // read-only in use.
+ {[]Statement{{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", true, false},
+ // write-only in use.
+ {[]Statement{{
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", false, true},
+ // read-write in use.
+ {[]Statement{{
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", true, true},
+ // read-write multi-statements.
+ {[]Statement{{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/ground"),
+ }}, "mybucket", "hello", true, true},
+ }
+
+ for _, testCase := range testCases {
+ result1, result2 := getInUsePolicy(testCase.statements, testCase.bucketName, testCase.prefix)
+ if !(result1 == testCase.expectedResult1 && result2 == testCase.expectedResult2) {
+ t.Fatalf("%+v: expected: [%t,%t], got: [%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2,
+ result1, result2)
+ }
+ }
+}
+
+// removeStatements() is called and the result is validated.
+func TestRemoveStatements(t *testing.T) {
+ unknownCondMap1 := make(ConditionMap)
+ unknownCondKeyMap1 := make(ConditionKeyMap)
+ unknownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap1.Add("StringNotEquals", unknownCondKeyMap1)
+
+ unknownCondMap11 := make(ConditionMap)
+ unknownCondKeyMap11 := make(ConditionKeyMap)
+ unknownCondKeyMap11.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap11.Add("StringNotEquals", unknownCondKeyMap11)
+
+ unknownCondMap12 := make(ConditionMap)
+ unknownCondKeyMap12 := make(ConditionKeyMap)
+ unknownCondKeyMap12.Add("s3:prefix", set.CreateStringSet("hello"))
+ unknownCondMap12.Add("StringNotEquals", unknownCondKeyMap12)
+
+ knownCondMap1 := make(ConditionMap)
+ knownCondKeyMap1 := make(ConditionKeyMap)
+ knownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap1.Add("StringEquals", knownCondKeyMap1)
+
+ knownCondMap11 := make(ConditionMap)
+ knownCondKeyMap11 := make(ConditionKeyMap)
+ knownCondKeyMap11.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap11.Add("StringEquals", knownCondKeyMap11)
+
+ knownCondMap12 := make(ConditionMap)
+ knownCondKeyMap12 := make(ConditionKeyMap)
+ knownCondKeyMap12.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap12.Add("StringEquals", knownCondKeyMap12)
+
+ knownCondMap13 := make(ConditionMap)
+ knownCondKeyMap13 := make(ConditionKeyMap)
+ knownCondKeyMap13.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap13.Add("StringEquals", knownCondKeyMap13)
+
+ knownCondMap14 := make(ConditionMap)
+ knownCondKeyMap14 := make(ConditionKeyMap)
+ knownCondKeyMap14.Add("s3:prefix", set.CreateStringSet("hello"))
+ knownCondMap14.Add("StringEquals", knownCondKeyMap14)
+
+ knownCondMap2 := make(ConditionMap)
+ knownCondKeyMap2 := make(ConditionKeyMap)
+ knownCondKeyMap2.Add("s3:prefix", set.CreateStringSet("hello", "world"))
+ knownCondMap2.Add("StringEquals", knownCondKeyMap2)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // All empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", `[]`},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Non-empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Non-empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", `[]`},
+ // Empty statements, non-empty bucket name non-empty prefix.
+ {[]Statement{}, "mybucket", "hello", `[]`},
+ // Empty statements, empty bucket name and non-empty prefix.
+ {[]Statement{}, "", "hello", `[]`},
+ // Statement with unknown Actions with empty prefix.
+ {[]Statement{{
+ Actions: set.CreateStringSet("s3:ListBucketVersions", "s3:ListAllMyBuckets"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListAllMyBuckets","s3:ListBucketVersions"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Actions.
+ {[]Statement{{
+ Actions: set.CreateStringSet("s3:ListBucketVersions", "s3:ListAllMyBuckets"),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListAllMyBuckets","s3:ListBucketVersions"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Effect with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Deny","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Effect.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Deny","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.AWS with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["arn:aws:iam::AccountNumberWithoutHyphens:root"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.AWS.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["arn:aws:iam::AccountNumberWithoutHyphens:root"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.CanonicalUser with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{CanonicalUser: set.CreateStringSet("649262f44b8145cb")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"CanonicalUser":["649262f44b8145cb"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Principal.User.CanonicalUser.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{CanonicalUser: set.CreateStringSet("649262f44b8145cb")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"CanonicalUser":["649262f44b8145cb"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Conditions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statement with unknown Resource and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statement with unknown Resource.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statement with known Actions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[]`},
+ // Statement with known Actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[]`},
+ // Statement with known multiple Actions with empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions).Union(commonBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", `[]`},
+ // Statement with known multiple Actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions).Union(commonBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", `[]`},
+ // RemoveBucketActions with readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readWriteObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions contains other object prefix, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap2,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, readOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, writeOnlyInUse.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap11,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with known Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, known Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with unknown Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // RemoveBucketActions with prefix, unknown Conditions, readOnlyInUse and writeOnlyInUse.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap12,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/world"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/world"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[]`},
+ // readOnlyObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readOnlyObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // writeOnlyObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap14,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with prefix, known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: knownCondMap13,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[]`},
+ // readWriteObjectActions - RemoveObjectActions with unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // readWriteObjectActions - RemoveObjectActions with prefix, unknown condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, {
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }}, "mybucket", "hello", `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringNotEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := removeStatements(testCase.statements, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// appendStatement() is called and the result is validated.
+func TestAppendStatement(t *testing.T) {
+ condMap := make(ConditionMap)
+ condKeyMap := make(ConditionKeyMap)
+ condKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ condMap.Add("StringEquals", condKeyMap)
+
+ condMap1 := make(ConditionMap)
+ condKeyMap1 := make(ConditionKeyMap)
+ condKeyMap1.Add("s3:prefix", set.CreateStringSet("world"))
+ condMap1.Add("StringEquals", condKeyMap1)
+
+ unknownCondMap1 := make(ConditionMap)
+ unknownCondKeyMap1 := make(ConditionKeyMap)
+ unknownCondKeyMap1.Add("s3:prefix", set.CreateStringSet("world"))
+ unknownCondMap1.Add("StringNotEquals", unknownCondKeyMap1)
+
+ testCases := []struct {
+ statements []Statement
+ statement Statement
+ expectedResult string
+ }{
+ // Empty statements and empty new statement.
+ {[]Statement{}, Statement{}, `[]`},
+ // Non-empty statements and empty new statement.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{}, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Empty statements and non-empty new statement.
+ {[]Statement{}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Append existing statement.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Append same statement with different resource.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Append same statement with different actions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Elements of new statement contains elements in statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Elements of new statement with conditions contains elements in statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // Statements with condition and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with condition and same resources, and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello","world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with unknown condition and same resources, and new statement with known condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: unknownCondMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap1,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["world"]},"StringNotEquals":{"s3:prefix":["world"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements without condition and new statement with condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements with condition and new statement without condition.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: condMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket", "arn:aws:s3:::testbucket"),
+ }}, Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, `[{"Action":["s3:ListBucket","s3:ListBucketMultipartUploads"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket","arn:aws:s3:::testbucket"],"Sid":""},{"Action":["s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // Statements and new statement are different.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := appendStatement(testCase.statements, testCase.statement)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// getBucketPolicy() is called and the result is validated.
+func TestGetBucketPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ worldCondMap := make(ConditionMap)
+ worldCondKeyMap := make(ConditionKeyMap)
+ worldCondKeyMap.Add("s3:prefix", set.CreateStringSet("world"))
+ worldCondMap.Add("StringEquals", worldCondKeyMap)
+
+ notHelloCondMap := make(ConditionMap)
+ notHelloCondMap.Add("StringNotEquals", worldCondKeyMap)
+
+ testCases := []struct {
+ statement Statement
+ prefix string
+ expectedResult1 bool
+ expectedResult2 bool
+ expectedResult3 bool
+ }{
+ // Statement with invalid Effect.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with invalid Effect with prefix.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with invalid Principal.AWS.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with invalid Principal.AWS with prefix.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+
+ // Statement with commonBucketActions.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", true, false, false},
+ // Statement with commonBucketActions.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", true, false, false},
+
+ // Statement with commonBucketActions and condition.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with commonBucketActions and condition.
+ {Statement{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with writeOnlyBucketActions.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, true},
+ // Statement with writeOnlyBucketActions.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, true},
+ // Statement with writeOnlyBucketActions and condition
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with writeOnlyBucketActions and condition.
+ {Statement{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with readOnlyBucketActions.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, true, false},
+ // Statement with readOnlyBucketActions.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+ // Statement with readOnlyBucketActions with empty condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with empty condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+ // Statement with readOnlyBucketActions with matching condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with matching condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+
+ // Statement with readOnlyBucketActions with different condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: worldCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with different condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: worldCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, false, false},
+
+ // Statement with readOnlyBucketActions with StringNotEquals condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: notHelloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "", false, false, false},
+ // Statement with readOnlyBucketActions with StringNotEquals condition.
+ {Statement{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: notHelloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }, "hello", false, true, false},
+ }
+
+ for _, testCase := range testCases {
+ commonFound, readOnly, writeOnly := getBucketPolicy(testCase.statement, testCase.prefix)
+ if !(testCase.expectedResult1 == commonFound && testCase.expectedResult2 == readOnly && testCase.expectedResult3 == writeOnly) {
+ t.Fatalf("%+v: expected: [%t,%t,%t], got: [%t,%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2, testCase.expectedResult3,
+ commonFound, readOnly, writeOnly)
+ }
+ }
+}
+
+// getObjectPolicy() is called and the result is validated.
+func TestGetObjectPolicy(t *testing.T) {
+ testCases := []struct {
+ statement Statement
+ expectedResult1 bool
+ expectedResult2 bool
+ }{
+ // Statement with invalid Effect.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Deny",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with invalid Principal.AWS.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("arn:aws:iam::AccountNumberWithoutHyphens:root")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with condition.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: make(ConditionMap),
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, false},
+ // Statement with readOnlyObjectActions.
+ {Statement{
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, true, false},
+ // Statement with writeOnlyObjectActions.
+ {Statement{
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, false, true},
+ // Statement with readOnlyObjectActions and writeOnlyObjectActions.
+ {Statement{
+ Actions: readOnlyObjectActions.Union(writeOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/hello*"),
+ }, true, true},
+ }
+
+ for _, testCase := range testCases {
+ readOnly, writeOnly := getObjectPolicy(testCase.statement)
+ if !(testCase.expectedResult1 == readOnly && testCase.expectedResult2 == writeOnly) {
+ t.Fatalf("%+v: expected: [%t,%t], got: [%t,%t]", testCase,
+ testCase.expectedResult1, testCase.expectedResult2,
+ readOnly, writeOnly)
+ }
+ }
+}
+
+// GetPolicyRules is called and the result is validated
+func TestListBucketPolicies(t *testing.T) {
+
+ // Condition for read objects
+ downloadCondMap := make(ConditionMap)
+ downloadCondKeyMap := make(ConditionKeyMap)
+ downloadCondKeyMap.Add("s3:prefix", set.CreateStringSet("download"))
+ downloadCondMap.Add("StringEquals", downloadCondKeyMap)
+
+ // Condition for readwrite objects
+ downloadUploadCondMap := make(ConditionMap)
+ downloadUploadCondKeyMap := make(ConditionKeyMap)
+ downloadUploadCondKeyMap.Add("s3:prefix", set.CreateStringSet("both"))
+ downloadUploadCondMap.Add("StringEquals", downloadUploadCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult map[string]BucketPolicy
+ }{
+ // Empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", map[string]BucketPolicy{}},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", map[string]BucketPolicy{}},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", map[string]BucketPolicy{}},
+ // Readonly object statement
+ {[]Statement{
+ {
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: downloadCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/download*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/download*": BucketPolicyReadOnly}},
+ // Write Only
+ {[]Statement{
+ {
+ Actions: commonBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: writeOnlyObjectActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/upload*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/upload*": BucketPolicyWriteOnly}},
+ // Readwrite
+ {[]Statement{
+ {
+ Actions: commonBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: downloadUploadCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ },
+ {
+ Actions: writeOnlyObjectActions.Union(readOnlyObjectActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket/both*"),
+ }}, "mybucket", "", map[string]BucketPolicy{"mybucket/both*": BucketPolicyReadWrite}},
+ }
+
+ for _, testCase := range testCases {
+ policyRules := GetPolicies(testCase.statements, testCase.bucketName)
+ if !reflect.DeepEqual(testCase.expectedResult, policyRules) {
+ t.Fatalf("%+v:\n expected: %+v, got: %+v", testCase, testCase.expectedResult, policyRules)
+ }
+ }
+}
+
+// GetPolicy() is called and the result is validated.
+func TestGetPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ bucketName string
+ prefix string
+ expectedResult BucketPolicy
+ }{
+ // Empty statements, bucket name and prefix.
+ {[]Statement{}, "", "", BucketPolicyNone},
+ // Non-empty statements, empty bucket name and empty prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "", "", BucketPolicyNone},
+ // Empty statements, non-empty bucket name and empty prefix.
+ {[]Statement{}, "mybucket", "", BucketPolicyNone},
+ // not-matching Statements.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // not-matching Statements with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only commonBucketActions.
+ {[]Statement{{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only commonBucketActions with prefix.
+ {[]Statement{{
+ Actions: commonBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions with prefix with conditons.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only writeOnlyBucketActions.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only writeOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions and conditions.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "", BucketPolicyNone},
+ // Statements with only readOnlyBucketActions + writeOnlyBucketActions and conditions with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions.Union(writeOnlyBucketActions),
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, "mybucket", "hello", BucketPolicyNone},
+ }
+
+ for _, testCase := range testCases {
+ policy := GetPolicy(testCase.statements, testCase.bucketName, testCase.prefix)
+ if testCase.expectedResult != policy {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, policy)
+ }
+ }
+}
+
+// SetPolicy() is called and the result is validated.
+func TestSetPolicy(t *testing.T) {
+ helloCondMap := make(ConditionMap)
+ helloCondKeyMap := make(ConditionKeyMap)
+ helloCondKeyMap.Add("s3:prefix", set.CreateStringSet("hello"))
+ helloCondMap.Add("StringEquals", helloCondKeyMap)
+
+ testCases := []struct {
+ statements []Statement
+ policy BucketPolicy
+ bucketName string
+ prefix string
+ expectedResult string
+ }{
+ // BucketPolicyNone - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyNone, "", "", `[]`},
+ // BucketPolicyNone - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyNone, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""}]`},
+ // BucketPolicyNone - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyNone, "mybucket", "", `[]`},
+ // BucketPolicyNone - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyNone, "", "hello", `[]`},
+ // BucketPolicyReadOnly - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyReadOnly - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyReadOnly, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyReadOnly - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadOnly - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "hello", `[]`},
+ // BucketPolicyReadOnly - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadOnly, "", "", `[]`},
+ // BucketPolicyWriteOnly - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyWriteOnly, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyWriteOnly - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "", "hello", `[]`},
+ // BucketPolicyWriteOnly - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "", "", `[]`},
+ // BucketPolicyReadWrite - non-empty statements, bucket name and prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::testbucket"),
+ }}, BucketPolicyReadWrite, "", "", `[{"Action":["s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::testbucket"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, non-empty bucket name and prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // BucketPolicyReadWrite - empty statements, bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "", "hello", `[]`},
+ // BucketPolicyReadWrite - empty statements, non-empty bucket name and non-empty prefix.
+ {[]Statement{}, BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // Set readonly.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set readonly with prefix.
+ {[]Statement{{
+ Actions: writeOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ // Set writeonly.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyWriteOnly, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set writeonly with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyWriteOnly, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+
+ // Set readwrite.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadWrite, "mybucket", "", `[{"Action":["s3:GetBucketLocation","s3:ListBucket","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/*"],"Sid":""}]`},
+ // Set readwrite with prefix.
+ {[]Statement{{
+ Actions: readOnlyBucketActions,
+ Effect: "Allow",
+ Principal: User{AWS: set.CreateStringSet("*")},
+ Conditions: helloCondMap,
+ Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
+ }}, BucketPolicyReadWrite, "mybucket", "hello", `[{"Action":["s3:GetBucketLocation","s3:ListBucketMultipartUploads"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:ListBucket"],"Condition":{"StringEquals":{"s3:prefix":["hello"]}},"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket"],"Sid":""},{"Action":["s3:AbortMultipartUpload","s3:DeleteObject","s3:GetObject","s3:ListMultipartUploadParts","s3:PutObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::mybucket/hello*"],"Sid":""}]`},
+ }
+
+ for _, testCase := range testCases {
+ statements := SetPolicy(testCase.statements, testCase.policy, testCase.bucketName, testCase.prefix)
+ if data, err := json.Marshal(statements); err != nil {
+ t.Fatalf("unable encoding to json, %s", err)
+ } else if string(data) != testCase.expectedResult {
+ t.Fatalf("%+v: expected: %s, got: %s", testCase, testCase.expectedResult, string(data))
+ }
+ }
+}
+
+// Validates bucket policy string.
+func TestIsValidBucketPolicy(t *testing.T) {
+ testCases := []struct {
+ inputPolicy BucketPolicy
+ expectedResult bool
+ }{
+ // valid inputs.
+ {BucketPolicy("none"), true},
+ {BucketPolicy("readonly"), true},
+ {BucketPolicy("readwrite"), true},
+ {BucketPolicy("writeonly"), true},
+ // invalid input.
+ {BucketPolicy("readwriteonly"), false},
+ {BucketPolicy("writeread"), false},
+ }
+
+ for i, testCase := range testCases {
+ actualResult := testCase.inputPolicy.IsValidBucketPolicy()
+ if testCase.expectedResult != actualResult {
+ t.Errorf("Test %d: Expected IsValidBucket policy to be '%v' for policy \"%s\", but instead found it to be '%v'", i+1, testCase.expectedResult, testCase.inputPolicy, actualResult)
+ }
+ }
+}
+
+// Tests validate Bucket policy resource matcher.
+func TestBucketPolicyResourceMatch(t *testing.T) {
+
+ // generates\ statement with given resource..
+ generateStatement := func(resource string) Statement {
+ statement := Statement{}
+ statement.Resources = set.CreateStringSet(resource)
+ return statement
+ }
+
+ // generates resource prefix.
+ generateResource := func(bucketName, objectName string) string {
+ return awsResourcePrefix + bucketName + "/" + objectName
+ }
+
+ testCases := []struct {
+ resourceToMatch string
+ statement Statement
+ expectedResourceMatch bool
+ }{
+ // Test case 1-4.
+ // Policy with resource ending with bucket/* allows access to all objects inside the given bucket.
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ {generateResource("minio-bucket", ""), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/*")), true},
+ // Test case - 5.
+ // Policy with resource ending with bucket/oo* should not allow access to bucket/output.txt.
+ {generateResource("minio-bucket", "output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), false},
+ // Test case - 6.
+ // Policy with resource ending with bucket/oo* should allow access to bucket/ootput.txt.
+ {generateResource("minio-bucket", "ootput.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
+ // Test case - 7.
+ // Policy with resource ending with bucket/oo* allows access to all subfolders starting with "oo" inside given bucket.
+ {generateResource("minio-bucket", "oop-bucket/my-file"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/oo*")), true},
+ // Test case - 8.
+ {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
+ // Test case - 9.
+ {generateResource("minio-bucket", "Asia/India/1.pjg"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix, "minio-bucket"+"/Asia/Japan/*")), false},
+ // Test case - 10.
+ // Proves that the name space is flat.
+ {generateResource("minio-bucket", "Africa/Bihar/India/design_info.doc/Bihar"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
+ "minio-bucket"+"/*/India/*/Bihar")), true},
+ // Test case - 11.
+ // Proves that the name space is flat.
+ {generateResource("minio-bucket", "Asia/China/India/States/Bihar/output.txt"), generateStatement(fmt.Sprintf("%s%s", awsResourcePrefix,
+ "minio-bucket"+"/*/India/*/Bihar/*")), true},
+ }
+ for i, testCase := range testCases {
+ resources := testCase.statement.Resources.FuncMatch(resourceMatch, testCase.resourceToMatch)
+ actualResourceMatch := resources.Equals(testCase.statement.Resources)
+ if testCase.expectedResourceMatch != actualResourceMatch {
+ t.Errorf("Test %d: Expected Resource match to be `%v`, but instead found it to be `%v`", i+1, testCase.expectedResourceMatch, actualResourceMatch)
+ }
+ }
+}
diff --git a/pkg/set/stringset.go b/pkg/set/stringset.go
new file mode 100644
index 0000000..55084d4
--- /dev/null
+++ b/pkg/set/stringset.go
@@ -0,0 +1,196 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+// StringSet - uses map as set of strings.
+type StringSet map[string]struct{}
+
+// keys - returns StringSet keys.
+func (set StringSet) keys() []string {
+ keys := make([]string, 0, len(set))
+ for k := range set {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// IsEmpty - returns whether the set is empty or not.
+func (set StringSet) IsEmpty() bool {
+ return len(set) == 0
+}
+
+// Add - adds string to the set.
+func (set StringSet) Add(s string) {
+ set[s] = struct{}{}
+}
+
+// Remove - removes string in the set. It does nothing if string does not exist in the set.
+func (set StringSet) Remove(s string) {
+ delete(set, s)
+}
+
+// Contains - checks if string is in the set.
+func (set StringSet) Contains(s string) bool {
+ _, ok := set[s]
+ return ok
+}
+
+// FuncMatch - returns new set containing each value who passes match function.
+// A 'matchFn' should accept element in a set as first argument and
+// 'matchString' as second argument. The function can do any logic to
+// compare both the arguments and should return true to accept element in
+// a set to include in output set else the element is ignored.
+func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if matchFn(k, matchString) {
+ nset.Add(k)
+ }
+ }
+ return nset
+}
+
+// ApplyFunc - returns new set containing each value processed by 'applyFn'.
+// A 'applyFn' should accept element in a set as a argument and return
+// a processed string. The function can do any logic to return a processed
+// string.
+func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(applyFn(k))
+ }
+ return nset
+}
+
+// Equals - checks whether given set is equal to current set or not.
+func (set StringSet) Equals(sset StringSet) bool {
+ // If length of set is not equal to length of given set, the
+ // set is not equal to given set.
+ if len(set) != len(sset) {
+ return false
+ }
+
+ // As both sets are equal in length, check each elements are equal.
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Intersection - returns the intersection with given set as new set.
+func (set StringSet) Intersection(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Difference - returns the difference with given set as new set.
+func (set StringSet) Difference(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ if _, ok := sset[k]; !ok {
+ nset.Add(k)
+ }
+ }
+
+ return nset
+}
+
+// Union - returns the union with given set as new set.
+func (set StringSet) Union(sset StringSet) StringSet {
+ nset := NewStringSet()
+ for k := range set {
+ nset.Add(k)
+ }
+
+ for k := range sset {
+ nset.Add(k)
+ }
+
+ return nset
+}
+
+// MarshalJSON - converts to JSON data.
+func (set StringSet) MarshalJSON() ([]byte, error) {
+ return json.Marshal(set.keys())
+}
+
+// UnmarshalJSON - parses JSON data and creates new set with it.
+// If 'data' contains JSON string array, the set contains each string.
+// If 'data' contains JSON string, the set contains the string as one element.
+// If 'data' contains Other JSON types, JSON parse error is returned.
+func (set *StringSet) UnmarshalJSON(data []byte) error {
+ sl := []string{}
+ var err error
+ if err = json.Unmarshal(data, &sl); err == nil {
+ *set = make(StringSet)
+ for _, s := range sl {
+ set.Add(s)
+ }
+ } else {
+ var s string
+ if err = json.Unmarshal(data, &s); err == nil {
+ *set = make(StringSet)
+ set.Add(s)
+ }
+ }
+
+ return err
+}
+
+// String - returns printable string of the set.
+func (set StringSet) String() string {
+ return fmt.Sprintf("%s", set.keys())
+}
+
+// NewStringSet - creates new string set.
+func NewStringSet() StringSet {
+ return make(StringSet)
+}
+
+// CreateStringSet - creates new string set with given string values.
+func CreateStringSet(sl ...string) StringSet {
+ set := make(StringSet)
+ for _, k := range sl {
+ set.Add(k)
+ }
+ return set
+}
+
+// CopyStringSet - returns copy of given set.
+func CopyStringSet(set StringSet) StringSet {
+ nset := NewStringSet()
+ for k, v := range set {
+ nset[k] = v
+ }
+ return nset
+}
diff --git a/pkg/set/stringset_test.go b/pkg/set/stringset_test.go
new file mode 100644
index 0000000..4b74e70
--- /dev/null
+++ b/pkg/set/stringset_test.go
@@ -0,0 +1,322 @@
+/*
+ * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package set
+
+import (
+ "strings"
+ "testing"
+)
+
+// NewStringSet() is called and the result is validated.
+func TestNewStringSet(t *testing.T) {
+ if ss := NewStringSet(); !ss.IsEmpty() {
+ t.Fatalf("expected: true, got: false")
+ }
+}
+
+// CreateStringSet() is called and the result is validated.
+func TestCreateStringSet(t *testing.T) {
+ ss := CreateStringSet("foo")
+ if str := ss.String(); str != `[foo]` {
+ t.Fatalf("expected: %s, got: %s", `["foo"]`, str)
+ }
+}
+
+// CopyStringSet() is called and the result is validated.
+func TestCopyStringSet(t *testing.T) {
+ ss := CreateStringSet("foo")
+ sscopy := CopyStringSet(ss)
+ if !ss.Equals(sscopy) {
+ t.Fatalf("expected: %s, got: %s", ss, sscopy)
+ }
+}
+
+// StringSet.Add() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetAdd(t *testing.T) {
+ testCases := []struct {
+ value string
+ expectedResult string
+ }{
+ // Test first addition.
+ {"foo", `[foo]`},
+ // Test duplicate addition.
+ {"foo", `[foo]`},
+ // Test new addition.
+ {"bar", `[bar foo]`},
+ }
+
+ ss := NewStringSet()
+ for _, testCase := range testCases {
+ ss.Add(testCase.value)
+ if str := ss.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
+
+// StringSet.Remove() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetRemove(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ value string
+ expectedResult string
+ }{
+ // Test removing non-existen item.
+ {"baz", `[bar foo]`},
+ // Test remove existing item.
+ {"foo", `[bar]`},
+ // Test remove existing item again.
+ {"foo", `[bar]`},
+ // Test remove to make set to empty.
+ {"bar", `[]`},
+ }
+
+ for _, testCase := range testCases {
+ ss.Remove(testCase.value)
+ if str := ss.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
+
+// StringSet.Contains() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetContains(t *testing.T) {
+ ss := CreateStringSet("foo")
+ testCases := []struct {
+ value string
+ expectedResult bool
+ }{
+ // Test to check non-existent item.
+ {"bar", false},
+ // Test to check existent item.
+ {"foo", true},
+ // Test to verify case sensitivity.
+ {"Foo", false},
+ }
+
+ for _, testCase := range testCases {
+ if result := ss.Contains(testCase.value); result != testCase.expectedResult {
+ t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.FuncMatch() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetFuncMatch(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ matchFn func(string, string) bool
+ value string
+ expectedResult string
+ }{
+ // Test to check match function doing case insensive compare.
+ {func(setValue string, compareValue string) bool {
+ return strings.ToUpper(setValue) == strings.ToUpper(compareValue)
+ }, "Bar", `[bar]`},
+ // Test to check match function doing prefix check.
+ {func(setValue string, compareValue string) bool {
+ return strings.HasPrefix(compareValue, setValue)
+ }, "foobar", `[foo]`},
+ }
+
+ for _, testCase := range testCases {
+ s := ss.FuncMatch(testCase.matchFn, testCase.value)
+ if result := s.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.ApplyFunc() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetApplyFunc(t *testing.T) {
+ ss := CreateStringSet("foo", "bar")
+ testCases := []struct {
+ applyFn func(string) string
+ expectedResult string
+ }{
+ // Test to apply function prepending a known string.
+ {func(setValue string) string { return "mybucket/" + setValue }, `[mybucket/bar mybucket/foo]`},
+ // Test to apply function modifying values.
+ {func(setValue string) string { return setValue[1:] }, `[ar oo]`},
+ }
+
+ for _, testCase := range testCases {
+ s := ss.ApplyFunc(testCase.applyFn)
+ if result := s.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Equals() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetEquals(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult bool
+ }{
+ // Test equal set
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), true},
+ // Test second set with more items
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz"), false},
+ // Test second set with less items
+ {CreateStringSet("foo", "bar"), CreateStringSet("bar"), false},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Equals(testCase.set2); result != testCase.expectedResult {
+ t.Fatalf("expected: %t, got: %t", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Intersection() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetIntersection(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test intersecting all values.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test intersecting all values in second set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test intersecting different values in second set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("baz")},
+ // Test intersecting none.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), NewStringSet()},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Intersection(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Difference() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetDifference(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test differing none.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), NewStringSet()},
+ // Test differing in first set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("baz")},
+ // Test differing values in both set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo")},
+ // Test differing all values.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz")},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Difference(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.Union() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetUnion(t *testing.T) {
+ testCases := []struct {
+ set1 StringSet
+ set2 StringSet
+ expectedResult StringSet
+ }{
+ // Test union same values.
+ {CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar")},
+ // Test union same values in second set.
+ {CreateStringSet("foo", "bar", "baz"), CreateStringSet("foo", "bar"), CreateStringSet("foo", "bar", "baz")},
+ // Test union different values in both set.
+ {CreateStringSet("foo", "baz"), CreateStringSet("baz", "bar"), CreateStringSet("foo", "baz", "bar")},
+ // Test union all different values.
+ {CreateStringSet("foo", "baz"), CreateStringSet("poo", "bar"), CreateStringSet("foo", "baz", "poo", "bar")},
+ }
+
+ for _, testCase := range testCases {
+ if result := testCase.set1.Union(testCase.set2); !result.Equals(testCase.expectedResult) {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.MarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetMarshalJSON(t *testing.T) {
+ testCases := []struct {
+ set StringSet
+ expectedResult string
+ }{
+ // Test set with values.
+ {CreateStringSet("foo", "bar"), `["bar","foo"]`},
+ // Test empty set.
+ {NewStringSet(), "[]"},
+ }
+
+ for _, testCase := range testCases {
+ if result, _ := testCase.set.MarshalJSON(); string(result) != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, string(result))
+ }
+ }
+}
+
+// StringSet.UnmarshalJSON() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetUnmarshalJSON(t *testing.T) {
+ testCases := []struct {
+ data []byte
+ expectedResult string
+ }{
+ // Test to convert JSON array to set.
+ {[]byte(`["bar","foo"]`), `[bar foo]`},
+ // Test to convert JSON string to set.
+ {[]byte(`"bar"`), `[bar]`},
+ // Test to convert JSON empty array to set.
+ {[]byte(`[]`), `[]`},
+ // Test to convert JSON empty string to set.
+ {[]byte(`""`), `[]`},
+ }
+
+ for _, testCase := range testCases {
+ var set StringSet
+ set.UnmarshalJSON(testCase.data)
+ if result := set.String(); result != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, result)
+ }
+ }
+}
+
+// StringSet.String() is called with series of cases for valid and erroneous inputs and the result is validated.
+func TestStringSetString(t *testing.T) {
+ testCases := []struct {
+ set StringSet
+ expectedResult string
+ }{
+ // Test empty set.
+ {NewStringSet(), `[]`},
+ // Test set with empty value.
+ {CreateStringSet(""), `[]`},
+ // Test set with value.
+ {CreateStringSet("foo"), `[foo]`},
+ }
+
+ for _, testCase := range testCases {
+ if str := testCase.set.String(); str != testCase.expectedResult {
+ t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
+ }
+ }
+}
diff --git a/request-signature-v2.go b/request-signature-v2.go
index 3a83c55..b9f2482 100644
--- a/request-signature-v2.go
+++ b/request-signature-v2.go
@@ -257,6 +257,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
// have signature-related issues
var resourceList = []string{
"acl",
+ "delete",
"location",
"logging",
"notification",
diff --git a/request-signature-v4.go b/request-signature-v4.go
index dfd11e9..2be3808 100644
--- a/request-signature-v4.go
+++ b/request-signature-v4.go
@@ -113,7 +113,7 @@ func getHashedPayload(req http.Request) string {
hashedPayload := req.Header.Get("X-Amz-Content-Sha256")
if hashedPayload == "" {
// Presign does not have a payload, use S3 recommended value.
- hashedPayload = "UNSIGNED-PAYLOAD"
+ hashedPayload = unsignedPayload
}
return hashedPayload
}
diff --git a/s3-endpoints.go b/s3-endpoints.go
index a46b5e3..3f159bd 100644
--- a/s3-endpoints.go
+++ b/s3-endpoints.go
@@ -24,7 +24,9 @@ var awsS3EndpointMap = map[string]string{
"us-west-1": "s3-us-west-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
+ "ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",
+ "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com",
"ap-northeast-1": "s3-ap-northeast-1.amazonaws.com",
"ap-northeast-2": "s3-ap-northeast-2.amazonaws.com",
"sa-east-1": "s3-sa-east-1.amazonaws.com",
diff --git a/utils.go b/utils.go
index 6b98aa5..2208d36 100644
--- a/utils.go
+++ b/utils.go
@@ -310,7 +310,14 @@ func isValidObjectPrefix(objectPrefix string) error {
return nil
}
-// queryEncode - encodes query values in their URL encoded form.
+//expects ascii encoded strings - from output of urlEncodePath
+func percentEncodeSlash(s string) string {
+ return strings.Replace(s, "/", "%2F", -1)
+}
+
+// queryEncode - encodes query values in their URL encoded form. In
+// addition to the percent encoding performed by urlEncodePath() used
+// here, it also percent encodes '/' (forward slash)
func queryEncode(v url.Values) string {
if v == nil {
return ""
@@ -323,13 +330,13 @@ func queryEncode(v url.Values) string {
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
- prefix := urlEncodePath(k) + "="
+ prefix := percentEncodeSlash(urlEncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
- buf.WriteString(urlEncodePath(v))
+ buf.WriteString(percentEncodeSlash(urlEncodePath(v)))
}
}
return buf.String()
diff --git a/utils_test.go b/utils_test.go
index 801fa31..1a30d54 100644
--- a/utils_test.go
+++ b/utils_test.go
@@ -356,6 +356,31 @@ func TestIsValidBucketName(t *testing.T) {
}
+func TestPercentEncodeSlash(t *testing.T) {
+ testCases := []struct {
+ input string
+ output string
+ }{
+ {"test123", "test123"},
+ {"abc,+_1", "abc,+_1"},
+ {"%40prefix=test%40123", "%40prefix=test%40123"},
+ {"key1=val1/val2", "key1=val1%2Fval2"},
+ {"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
+ }
+
+ for i, testCase := range testCases {
+ receivedOutput := percentEncodeSlash(testCase.input)
+ if testCase.output != receivedOutput {
+ t.Errorf(
+ "Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
+ i+1, testCase.input, testCase.output,
+ receivedOutput,
+ )
+
+ }
+ }
+}
+
// Tests validate the query encoder.
func TestQueryEncode(t *testing.T) {
testCases := []struct {
@@ -366,6 +391,7 @@ func TestQueryEncode(t *testing.T) {
}{
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
+ {"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
{"prefix", []string{"test#123"}, "prefix=test%23123"},
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
{"prefix", []string{"test123"}, "prefix=test123"},