From 8f728c4104794b09ee49e918c1318f311d000904 Mon Sep 17 00:00:00 2001 From: Nicolas Juhel Date: Tue, 4 Jul 2017 16:24:59 +0200 Subject: [PATCH] Transform the hard-coded secure mode false in S3client to a config options to allow specify a secure connection as true Bump Godep Lib github.com/minio/minio-go to release v2.1.0 / rev : 2f03abaa07d8bc57faef16cda7655ea62a7e0bed to fix ceph S3 issues Update vendor/github.com/minio/minio-go --- Godeps/Godeps.json | 29 +- plugin/s3/plugin.go | 35 +- vendor/github.com/minio/minio-go/.travis.yml | 9 +- vendor/github.com/minio/minio-go/INSTALLGO.md | 83 --- vendor/github.com/minio/minio-go/README.md | 252 +++++-- .../{api-definitions.go => api-datatypes.go} | 9 +- .../minio/minio-go/api-error-response.go | 136 ++-- .../minio/minio-go/api-get-object-file.go | 11 +- .../minio/minio-go/api-get-object.go | 686 ++++++++++++++++++ .../minio/minio-go/api-get-policy.go | 106 +++ vendor/github.com/minio/minio-go/api-get.go | 537 -------------- vendor/github.com/minio/minio-go/api-list.go | 274 +++++-- .../minio/minio-go/api-notification.go | 225 ++++++ .../minio/minio-go/api-presigned.go | 76 +- .../minio/minio-go/api-put-bucket.go | 287 +++++--- .../minio/minio-go/api-put-object-common.go | 253 +++---- .../minio/minio-go/api-put-object-copy.go | 72 ++ .../minio/minio-go/api-put-object-file.go | 248 ++++--- .../minio-go/api-put-object-multipart.go | 298 +++++--- .../minio/minio-go/api-put-object-progress.go | 142 +++- .../minio/minio-go/api-put-object-readat.go | 261 ++++--- .../minio/minio-go/api-put-object.go | 245 ++++--- .../github.com/minio/minio-go/api-remove.go | 193 ++++- ...-s3-definitions.go => api-s3-datatypes.go} | 111 ++- vendor/github.com/minio/minio-go/api-stat.go | 150 ++-- vendor/github.com/minio/minio-go/api.go | 418 ++++++++--- vendor/github.com/minio/minio-go/appveyor.yml | 5 +- .../github.com/minio/minio-go/bucket-acl.go | 75 -- .../github.com/minio/minio-go/bucket-cache.go | 74 +- .../minio/minio-go/bucket-notification.go | 231 ++++++ vendor/github.com/minio/minio-go/constants.go | 24 +- .../minio/minio-go/copy-conditions.go | 99 +++ vendor/github.com/minio/minio-go/core.go | 113 +++ .../github.com/minio/minio-go/hook-reader.go | 20 +- .../minio/minio-go/pkg/encrypt/cbc.go | 284 ++++++++ .../minio/minio-go/pkg/encrypt/interface.go | 50 ++ .../minio/minio-go/pkg/encrypt/keys.go | 165 +++++ .../pkg/policy/bucket-policy-condition.go | 115 +++ .../minio-go/pkg/policy/bucket-policy.go | 634 ++++++++++++++++ .../s3signer/request-signature-streaming.go | 285 ++++++++ .../s3signer}/request-signature-v2.go | 132 ++-- .../s3signer}/request-signature-v4.go | 48 +- .../minio/minio-go/pkg/s3signer/utils.go | 39 + .../minio/minio-go/pkg/s3utils/utils.go | 183 +++++ .../minio/minio-go/pkg/set/stringset.go | 196 +++++ .../github.com/minio/minio-go/post-policy.go | 18 + .../minio/minio-go/request-headers.go | 111 +++ .../minio/minio-go/retry-continous.go | 52 ++ vendor/github.com/minio/minio-go/retry.go | 152 ++++ .../github.com/minio/minio-go/s3-endpoints.go | 7 + vendor/github.com/minio/minio-go/s3-error.go | 60 ++ .../minio/minio-go/signature-type.go | 8 + vendor/github.com/minio/minio-go/utils.go | 183 +---- 53 files changed, 6498 insertions(+), 2011 deletions(-) delete mode 100644 vendor/github.com/minio/minio-go/INSTALLGO.md rename vendor/github.com/minio/minio-go/{api-definitions.go => api-datatypes.go} (93%) create mode 100644 vendor/github.com/minio/minio-go/api-get-object.go create mode 100644 vendor/github.com/minio/minio-go/api-get-policy.go delete mode 100644 vendor/github.com/minio/minio-go/api-get.go create mode 100644 vendor/github.com/minio/minio-go/api-notification.go create mode 100644 vendor/github.com/minio/minio-go/api-put-object-copy.go rename vendor/github.com/minio/minio-go/{api-s3-definitions.go => api-s3-datatypes.go} (62%) delete mode 100644 vendor/github.com/minio/minio-go/bucket-acl.go create mode 100644 vendor/github.com/minio/minio-go/bucket-notification.go create mode 100644 vendor/github.com/minio/minio-go/copy-conditions.go create mode 100644 vendor/github.com/minio/minio-go/core.go create mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go create mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/interface.go create mode 100644 vendor/github.com/minio/minio-go/pkg/encrypt/keys.go create mode 100644 vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go create mode 100644 vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go create mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go rename vendor/github.com/minio/minio-go/{ => pkg/s3signer}/request-signature-v2.go (69%) rename vendor/github.com/minio/minio-go/{ => pkg/s3signer}/request-signature-v4.go (85%) create mode 100644 vendor/github.com/minio/minio-go/pkg/s3signer/utils.go create mode 100644 vendor/github.com/minio/minio-go/pkg/s3utils/utils.go create mode 100644 vendor/github.com/minio/minio-go/pkg/set/stringset.go create mode 100644 vendor/github.com/minio/minio-go/request-headers.go create mode 100644 vendor/github.com/minio/minio-go/retry-continous.go create mode 100644 vendor/github.com/minio/minio-go/retry.go create mode 100644 vendor/github.com/minio/minio-go/s3-error.go diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 005c21846..36de6eff0 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -212,8 +212,8 @@ }, { "ImportPath": "github.com/minio/minio-go", - "Comment": "v1.0.0", - "Rev": "84b89f6f458668d047b0de2d2cdf596dcf4a79a1" + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" }, { "ImportPath": "github.com/onsi/ginkgo", @@ -493,6 +493,31 @@ { "ImportPath": "github.com/starkandwayne/goutils/botta/vendor/golang.org/x/sys/unix", "Rev": "a28afee5ab1f712454bee22cd403f512308e25e2" + }, + { + "ImportPath": "github.com/minio/minio-go/pkg/encrypt", + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" + }, + { + "ImportPath": "github.com/minio/minio-go/pkg/policy", + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" + }, + { + "ImportPath": "github.com/minio/minio-go/pkg/s3signer", + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" + }, + { + "ImportPath": "github.com/minio/minio-go/pkg/s3utils", + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" + }, + { + "ImportPath": "github.com/minio/minio-go/pkg/set", + "Comment": "v2.1.0", + "Rev": "2f03abaa07d8bc57faef16cda7655ea62a7e0bed" } ] } diff --git a/plugin/s3/plugin.go b/plugin/s3/plugin.go index c52c6d381..a54759275 100644 --- a/plugin/s3/plugin.go +++ b/plugin/s3/plugin.go @@ -23,10 +23,11 @@ // "access_key_id": "your-access-key-id", // "secret_access_key": "your-secret-access-key", // "skip_ssl_validation": false, +// "secure": false, # Define scheme to use http for false, https for true (default : false) // "bucket": "bucket-name", // "prefix": "/path/inside/bucket/to/place/backup/data", // "signature_version": "4", # should be 2 or 4. Defaults to 4 -// "socks5_proxy": "" # optionally defined SOCKS5 proxy to use for the s3 communications +// "socks5_proxy": "", # optionally defined SOCKS5 proxy to use for the s3 communications // "s3_port": "" # optionally defined port to use for the s3 communications // } // @@ -35,7 +36,8 @@ // { // "s3_host" : "s3.amazonawd.com", // "signature_version" : "4", -// "skip_ssl_validation" : false +// "skip_ssl_validation" : false, +// "secure" : false // } // // `prefix` will default to the empty string, and backups will be placed in the @@ -90,6 +92,7 @@ const ( DefaultPrefix = "" DefaultSigVersion = "4" DefaultSkipSSLValidation = false + DefaultSecure = false ) func validSigVersion(v string) bool { @@ -114,6 +117,7 @@ func main() { "s3_host" : "s3.amazonaws.com", # override Amazon S3 endpoint "s3_port" : "" # optional port to access s3_host on "skip_ssl_validation" : false, # Skip certificate verification (not recommended) + "secure" : false, # Define scheme to use : true = https / false = http (default false) "prefix" : "/path/in/bucket", # where to store archives, inside the bucket "signature_version" : "4", # AWS signature version; must be '2' or '4' "socks5_proxy" : "" # optional SOCKS5 proxy for accessing S3 @@ -123,7 +127,8 @@ func main() { { "s3_host" : "s3.amazonawd.com", "signature_version" : "4", - "skip_ssl_validation" : false + "skip_ssl_validation" : false, + "secure" : false } `, } @@ -136,6 +141,7 @@ type S3Plugin plugin.PluginInfo type S3ConnectionInfo struct { Host string SkipSSLValidation bool + Secure bool AccessKey string SecretKey string Bucket string @@ -152,6 +158,7 @@ func (p S3Plugin) Meta() plugin.PluginInfo { func (p S3Plugin) Validate(endpoint plugin.ShieldEndpoint) error { var ( s string + tf bool err error fail bool ) @@ -233,7 +240,7 @@ func (p S3Plugin) Validate(endpoint plugin.ShieldEndpoint) error { ansi.Printf("@G{\u2713 socks5_proxy} @C{%s}\n", s) } - tf, err := endpoint.BooleanValueDefault("skip_ssl_validation", DefaultSkipSSLValidation) + tf, err = endpoint.BooleanValueDefault("skip_ssl_validation", DefaultSkipSSLValidation) if err != nil { ansi.Printf("@R{\u2717 skip_ssl_validation %s}\n", err) fail = true @@ -243,6 +250,16 @@ func (p S3Plugin) Validate(endpoint plugin.ShieldEndpoint) error { ansi.Printf("@G{\u2713 skip_ssl_validation} @C{no}, SSL @Y{WILL} be validated\n") } + tf, err = endpoint.BooleanValueDefault("secure", DefaultSecure) + if err != nil { + ansi.Printf("@R{\u2717 secure %s}\n", err) + fail = true + } else if tf { + ansi.Printf("@G{\u2713 secure} @C{yes}, S3 Endpoint will be validated in @Y{https} mode\n") + } else { + ansi.Printf("@G{\u2713 skip_ssl_validation} @C{no}, S3 Endpoint will be validated in @Y{http} mode\n") + } + if fail { return fmt.Errorf("s3: invalid configuration") } @@ -335,6 +352,11 @@ func getS3ConnInfo(e plugin.ShieldEndpoint) (S3ConnectionInfo, error) { return S3ConnectionInfo{}, err } + mode_ssl, err := e.BooleanValueDefault("secure", DefaultSecure) + if err != nil { + return S3ConnectionInfo{}, err + } + key, err := e.StringValue("access_key_id") if err != nil { return S3ConnectionInfo{}, err @@ -374,6 +396,7 @@ func getS3ConnInfo(e plugin.ShieldEndpoint) (S3ConnectionInfo, error) { return S3ConnectionInfo{ Host: host, SkipSSLValidation: insecure_ssl, + Secure: mode_ssl, AccessKey: key, SecretKey: secret, Bucket: bucket, @@ -405,9 +428,9 @@ func (s3 S3ConnectionInfo) Connect() (*minio.Client, error) { } if s3.SignatureVersion == "2" { - s3Client, err = minio.NewV2(s3Host, s3.AccessKey, s3.SecretKey, false) + s3Client, err = minio.NewV2(s3Host, s3.AccessKey, s3.SecretKey, s3.Secure) } else { - s3Client, err = minio.NewV4(s3Host, s3.AccessKey, s3.SecretKey, false) + s3Client, err = minio.NewV4(s3Host, s3.AccessKey, s3.SecretKey, s3.Secure) } if err != nil { return nil, err diff --git a/vendor/github.com/minio/minio-go/.travis.yml b/vendor/github.com/minio/minio-go/.travis.yml index 32873865b..066cbe883 100644 --- a/vendor/github.com/minio/minio-go/.travis.yml +++ b/vendor/github.com/minio/minio-go/.travis.yml @@ -3,21 +3,18 @@ language: go os: - linux -- osx env: - ARCH=x86_64 - ARCH=i686 go: -- 1.5.2 - 1.5.3 +- 1.6 +- 1.7.4 +- 1.8 script: - diff -au <(gofmt -d .) <(printf "") - go vet ./... - go test -short -race -v ./... - -notifications: - slack: - secure: HrOX2k6F/sEl6Rr4m5vHOdJCIwV42be0kz1Jy/WSMvrl/fQ8YkldKviLeWh4aWt1kclsYhNQ4FqGML+RIZYsdOqej4fAw9Vi5pZkI1MzPJq0UjrtMqkqzvD90eDGQYCKwaXjEIN8cohwJeb6X0B0HKAd9sqJW5GH5SwnhH5WWP8= diff --git a/vendor/github.com/minio/minio-go/INSTALLGO.md b/vendor/github.com/minio/minio-go/INSTALLGO.md deleted file mode 100644 index c3762bbfc..000000000 --- a/vendor/github.com/minio/minio-go/INSTALLGO.md +++ /dev/null @@ -1,83 +0,0 @@ -## Ubuntu (Kylin) 14.04 -### Build Dependencies -This installation guide is based on Ubuntu 14.04+ on x86-64 platform. - -##### Install Git, GCC -```sh -$ sudo apt-get install git build-essential -``` - -##### Install Go 1.5+ - -Download Go 1.5+ from [https://golang.org/dl/](https://golang.org/dl/). - -```sh -$ wget https://storage.googleapis.com/golang/go1.5.1.linux-amd64.tar.gz -$ mkdir -p ${HOME}/bin/ -$ mkdir -p ${HOME}/go/ -$ tar -C ${HOME}/bin/ -xzf go1.5.1.linux-amd64.tar.gz -``` -##### Setup GOROOT and GOPATH - -Add the following exports to your ``~/.bashrc``. Environment variable GOROOT specifies the location of your golang binaries -and GOPATH specifies the location of your project workspace. - -```sh -export GOROOT=${HOME}/bin/go -export GOPATH=${HOME}/go -export PATH=$PATH:${HOME}/bin/go/bin:${GOPATH}/bin -``` -```sh -$ source ~/.bashrc -``` - -##### Testing it all -```sh -$ go env -``` - -## OS X (Yosemite) 10.10 -### Build Dependencies -This installation document assumes OS X Yosemite 10.10+ on x86-64 platform. - -##### Install brew -```sh -$ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" -``` - -##### Install Git, Python -```sh -$ brew install git python -``` - -##### Install Go 1.5+ - -Install golang binaries using `brew` - -```sh -$ brew install go -$ mkdir -p $HOME/go -``` - -##### Setup GOROOT and GOPATH - -Add the following exports to your ``~/.bash_profile``. Environment variable GOROOT specifies the location of your golang binaries -and GOPATH specifies the location of your project workspace. - -```sh -export GOPATH=${HOME}/go -export GOVERSION=$(brew list go | head -n 1 | cut -d '/' -f 6) -export GOROOT=$(brew --prefix)/Cellar/go/${GOVERSION}/libexec -export PATH=$PATH:${GOPATH}/bin -``` - -##### Source the new enviornment - -```sh -$ source ~/.bash_profile -``` - -##### Testing it all -```sh -$ go env -``` diff --git a/vendor/github.com/minio/minio-go/README.md b/vendor/github.com/minio/minio-go/README.md index 967824e1f..4a91dc9c8 100644 --- a/vendor/github.com/minio/minio-go/README.md +++ b/vendor/github.com/minio/minio-go/README.md @@ -1,98 +1,250 @@ -# Minio Go Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) [![Sourcegraph](https://sourcegraph.com/github.com/minio/minio-go/-/badge.svg)](https://sourcegraph.com/github.com/minio/minio-go?badge) -## Description +The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage. -Minio Go library is a simple client library for S3 compatible cloud storage servers. Supports AWS Signature Version 4 and 2. AWS Signature Version 4 is chosen as default. +**Supported cloud storage providers:** -List of supported cloud storage providers. - - - AWS Signature Version 4 +- AWS Signature Version 4 - Amazon S3 - Minio - - AWS Signature Version 2 +- AWS Signature Version 2 - Google Cloud Storage (Compatibility Mode) - Openstack Swift + Swift3 middleware - Ceph Object Gateway - Riak CS -## Install +This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference). -If you do not have a working Golang environment, please follow [Install Golang](./INSTALLGO.md). +This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang). +## Download from Github ```sh -$ go get github.com/minio/minio-go +go get -u github.com/minio/minio-go ``` -## Example +## Initialize Minio Client +Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage. -### ListBuckets() +| Parameter | Description| +| :--- | :--- | +| endpoint | URL to object storage service. | +| accessKeyID | Access key is the user ID that uniquely identifies your account. | +| secretAccessKey | Secret key is the password to your account. | +| secure | Set this value to 'true' to enable secure (HTTPS) access. | -This example shows how to List your buckets. ```go package main import ( + "github.com/minio/minio-go" "log" +) + +func main() { + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + + log.Println("%v", minioClient) // minioClient is now setup +``` + +## Quick Start Example - File Uploader +This example program connects to an object storage server, creates a bucket and uploads a file to the bucket. + +We will use the Minio server running at [https://play.minio.io:9000](https://play.minio.io:9000) in this example. Feel free to use this service for testing and development. Access credentials shown in this example are open to the public. + +### FileUploader.go +```go +package main + +import ( "github.com/minio/minio-go" + "log" ) func main() { - // Requests are always secure (HTTPS) by default. Set insecure=true to enable insecure (HTTP) access. - // This boolean value is the last argument for New(). + endpoint := "play.minio.io:9000" + accessKeyID := "Q3AM3UQ867SPQQA43P2F" + secretAccessKey := "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG" + useSSL := true - // New returns an Amazon S3 compatible client object. API copatibality (v2 or v4) is automatically - // determined based on the Endpoint value. - s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESS-KEY-HERE", "YOUR-SECRET-KEY-HERE", false) + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) if err != nil { - log.Fatalln(err) + log.Fatalln(err) } - buckets, err := s3Client.ListBuckets() + + // Make a new bucket called mymusic. + bucketName := "mymusic" + location := "us-east-1" + + err = minioClient.MakeBucket(bucketName, location) if err != nil { - log.Fatalln(err) + // Check to see if we already own this bucket (which happens if you run this twice) + exists, err := minioClient.BucketExists(bucketName) + if err == nil && exists { + log.Printf("We already own %s\n", bucketName) + } else { + log.Fatalln(err) + } } - for _, bucket := range buckets { - log.Println(bucket) + log.Printf("Successfully created %s\n", bucketName) + + // Upload the zip file + objectName := "golden-oldies.zip" + filePath := "/tmp/golden-oldies.zip" + contentType := "application/zip" + + // Upload the zip file with FPutObject + n, err := minioClient.FPutObject(bucketName, objectName, filePath, contentType) + if err != nil { + log.Fatalln(err) } + + log.Printf("Successfully uploaded %s of size %d\n", objectName, n) } ``` -## Documentation +### Run FileUploader +```sh +go run file-uploader.go +2016/08/13 17:03:28 Successfully created mymusic +2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413 -### Bucket Operations. -* [MakeBucket(bucketName, BucketACL, location) error](examples/s3/makebucket.go) -* [BucketExists(bucketName) error](examples/s3/bucketexists.go) -* [RemoveBucket(bucketName) error](examples/s3/removebucket.go) -* [GetBucketACL(bucketName) (BucketACL, error)](examples/s3/getbucketacl.go) -* [SetBucketACL(bucketName, BucketACL) error)](examples/s3/setbucketacl.go) -* [ListBuckets() []BucketInfo](examples/s3/listbuckets.go) -* [ListObjects(bucketName, objectPrefix, recursive, chan<- struct{}) <-chan ObjectInfo](examples/s3/listobjects.go) -* [ListIncompleteUploads(bucketName, prefix, recursive, chan<- struct{}) <-chan ObjectMultipartInfo](examples/s3/listincompleteuploads.go) +mc ls play/mymusic/ +[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip +``` -### Object Operations. -* [PutObject(bucketName, objectName, io.Reader, contentType) error](examples/s3/putobject.go) -* [GetObject(bucketName, objectName) (*Object, error)](examples/s3/getobject.go) -* [StatObject(bucketName, objectName) (ObjectInfo, error)](examples/s3/statobject.go) -* [RemoveObject(bucketName, objectName) error](examples/s3/removeobject.go) -* [RemoveIncompleteUpload(bucketName, objectName) <-chan error](examples/s3/removeincompleteupload.go) +## API Reference +The full API Reference is available here. -### File Object Operations. -* [FPutObject(bucketName, objectName, filePath, contentType) (size, error)](examples/s3/fputobject.go) -* [FGetObject(bucketName, objectName, filePath) error](examples/s3/fgetobject.go) +* [Complete API Reference](https://docs.minio.io/docs/golang-client-api-reference) -### Presigned Operations. -* [PresignedGetObject(bucketName, objectName, time.Duration, url.Values) (string, error)](examples/s3/presignedgetobject.go) -* [PresignedPutObject(bucketName, objectName, time.Duration) (string, error)](examples/s3/presignedputobject.go) -* [PresignedPostPolicy(NewPostPolicy()) (map[string]string, error)](examples/s3/presignedpostpolicy.go) +### API Reference : Bucket Operations -### API Reference +* [`MakeBucket`](https://docs.minio.io/docs/golang-client-api-reference#MakeBucket) +* [`ListBuckets`](https://docs.minio.io/docs/golang-client-api-reference#ListBuckets) +* [`BucketExists`](https://docs.minio.io/docs/golang-client-api-reference#BucketExists) +* [`RemoveBucket`](https://docs.minio.io/docs/golang-client-api-reference#RemoveBucket) +* [`ListObjects`](https://docs.minio.io/docs/golang-client-api-reference#ListObjects) +* [`ListObjectsV2`](https://docs.minio.io/docs/golang-client-api-reference#ListObjectsV2) +* [`ListIncompleteUploads`](https://docs.minio.io/docs/golang-client-api-reference#ListIncompleteUploads) -[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/minio/minio-go) +### API Reference : Bucket policy Operations + +* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy) +* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy) +* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies) + +### API Reference : Bucket notification Operations + +* [`SetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketNotification) +* [`GetBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketNotification) +* [`RemoveAllBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#RemoveAllBucketNotification) +* [`ListenBucketNotification`](https://docs.minio.io/docs/golang-client-api-reference#ListenBucketNotification) (Minio Extension) + +### API Reference : File Object Operations + +* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) +* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject) + +### API Reference : Object Operations + +* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject) +* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject) +* [`PutObjectStreaming`](https://docs.minio.io/docs/golang-client-api-reference#PutObjectStreaming) +* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject) +* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject) +* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject) +* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects) +* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload) + +### API Reference: Encrypted Object Operations + +* [`GetEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#GetEncryptedObject) +* [`PutEncryptedObject`](https://docs.minio.io/docs/golang-client-api-reference#PutEncryptedObject) + +### API Reference : Presigned Operations + +* [`PresignedGetObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedGetObject) +* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject) +* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy) + +### API Reference : Client custom settings +* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo) +* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport) +* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn) +* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff) + + +## Full Examples + +#### Full Examples : Bucket Operations + +* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go) +* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go) +* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go) +* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go) +* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go) +* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go) +* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go) + +#### Full Examples : Bucket policy Operations + +* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go) +* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go) +* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go) + +#### Full Examples : Bucket notification Operations + +* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go) +* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go) +* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go) +* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension) + +#### Full Examples : File Object Operations + +* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go) +* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go) + +#### Full Examples : Object Operations + +* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go) +* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go) +* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go) +* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go) +* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go) +* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go) +* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go) + +#### Full Examples : Encrypted Object Operations + +* [put-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/put-encrypted-object.go) +* [get-encrypted-object.go](https://github.com/minio/minio-go/blob/master/examples/s3/get-encrypted-object.go) + +#### Full Examples : Presigned Operations +* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go) +* [presignedputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedputobject.go) +* [presignedpostpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedpostpolicy.go) + +## Explore Further +* [Complete Documentation](https://docs.minio.io) +* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference) +* [Go Music Player App Full Application Example](https://docs.minio.io/docs/go-music-player-app) ## Contribute -[Contributors Guide](./CONTRIBUTING.md) +[Contributors Guide](https://github.com/minio/minio-go/blob/master/CONTRIBUTING.md) + +[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) +[![Build status](https://ci.appveyor.com/api/projects/status/1d05e6nvxcelmrak?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) -[![Build Status](https://travis-ci.org/minio/minio-go.svg)](https://travis-ci.org/minio/minio-go) [![Build status](https://ci.appveyor.com/api/projects/status/1ep7n2resn6fk1w6?svg=true)](https://ci.appveyor.com/project/harshavardhana/minio-go) diff --git a/vendor/github.com/minio/minio-go/api-definitions.go b/vendor/github.com/minio/minio-go/api-datatypes.go similarity index 93% rename from vendor/github.com/minio/minio-go/api-definitions.go rename to vendor/github.com/minio/minio-go/api-datatypes.go index 0871b1cfb..ab2aa4af2 100644 --- a/vendor/github.com/minio/minio-go/api-definitions.go +++ b/vendor/github.com/minio/minio-go/api-datatypes.go @@ -16,7 +16,10 @@ package minio -import "time" +import ( + "net/http" + "time" +) // BucketInfo container for bucket metadata. type BucketInfo struct { @@ -38,6 +41,10 @@ type ObjectInfo struct { Size int64 `json:"size"` // Size in bytes of the object. ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data. + // Collection of additional metadata on the object. + // eg: x-amz-meta-*, content-encoding etc. + Metadata http.Header `json:"metadata"` + // Owner name. Owner struct { DisplayName string `json:"name"` diff --git a/vendor/github.com/minio/minio-go/api-error-response.go b/vendor/github.com/minio/minio-go/api-error-response.go index 647165112..ff8b8b109 100644 --- a/vendor/github.com/minio/minio-go/api-error-response.go +++ b/vendor/github.com/minio/minio-go/api-error-response.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,7 +47,10 @@ type ErrorResponse struct { // Region where the bucket is located. This header is returned // only in HEAD bucket and ListObjects response. - AmzBucketRegion string + Region string + + // Headers of the returned S3 XML error + Headers http.Header `xml:"-" json:"-"` } // ToErrorResponse - Returns parsed ErrorResponse struct from body and @@ -72,8 +75,15 @@ func ToErrorResponse(err error) ErrorResponse { } } -// Error - Returns HTTP error string +// Error - Returns S3 error string. func (e ErrorResponse) Error() string { + if e.Message == "" { + msg, ok := s3ErrorResponseMap[e.Code] + if !ok { + msg = fmt.Sprintf("Error response code %s.", e.Code) + } + return msg + } return e.Message } @@ -91,6 +101,7 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) return ErrInvalidArgument(msg) } var errResp ErrorResponse + err := xmlDecoder(resp.Body, &errResp) // Xml decoding failed with no body, fall back to HTTP headers. if err != nil { @@ -98,68 +109,75 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string) case http.StatusNotFound: if objectName == "" { errResp = ErrorResponse{ - Code: "NoSuchBucket", - Message: "The specified bucket does not exist.", - BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "NoSuchBucket", + Message: "The specified bucket does not exist.", + BucketName: bucketName, } } else { errResp = ErrorResponse{ - Code: "NoSuchKey", - Message: "The specified key does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "NoSuchKey", + Message: "The specified key does not exist.", + BucketName: bucketName, + Key: objectName, } } case http.StatusForbidden: errResp = ErrorResponse{ - Code: "AccessDenied", - Message: "Access Denied.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "AccessDenied", + Message: "Access Denied.", + BucketName: bucketName, + Key: objectName, } case http.StatusConflict: errResp = ErrorResponse{ - Code: "Conflict", - Message: "Bucket not empty.", - BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "Conflict", + Message: "Bucket not empty.", + BucketName: bucketName, + } + case http.StatusPreconditionFailed: + errResp = ErrorResponse{ + Code: "PreconditionFailed", + Message: s3ErrorResponseMap["PreconditionFailed"], + BucketName: bucketName, + Key: objectName, } default: errResp = ErrorResponse{ - Code: resp.Status, - Message: resp.Status, - BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: resp.Status, + Message: resp.Status, + BucketName: bucketName, } } } - // AccessDenied without a signature mismatch code, usually means - // that the bucket policy has certain restrictions where some API - // operations are not allowed. Handle this case so that top level - // callers can interpret this easily and fall back if needed to a - // lower functionality call. Read each individual API specific - // code for such fallbacks. - if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied" { - errResp.Code = "NotImplemented" - errResp.Message = "Operation is not allowed according to your bucket policy." + // Save hodID, requestID and region information + // from headers if not available through error XML. + if errResp.RequestID == "" { + errResp.RequestID = resp.Header.Get("x-amz-request-id") + } + if errResp.HostID == "" { + errResp.HostID = resp.Header.Get("x-amz-id-2") } + if errResp.Region == "" { + errResp.Region = resp.Header.Get("x-amz-bucket-region") + } + + // Save headers returned in the API XML error + errResp.Headers = resp.Header + return errResp } +// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration. +func ErrTransferAccelerationBucket(bucketName string) error { + msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").") + return ErrorResponse{ + Code: "InvalidArgument", + Message: msg, + BucketName: bucketName, + } +} + // ErrEntityTooLarge - Input size is larger than supported maximum. func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error { msg := fmt.Sprintf("Your proposed upload size ‘%d’ exceeds the maximum allowed object size ‘%d’ for single PUT operation.", totalSize, maxObjectSize) @@ -212,16 +230,6 @@ func ErrInvalidObjectName(message string) error { } } -// ErrInvalidParts - Invalid number of parts. -func ErrInvalidParts(expectedParts, uploadedParts int) error { - msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts) - return ErrorResponse{ - Code: "InvalidParts", - Message: msg, - RequestID: "minio", - } -} - // ErrInvalidObjectPrefix - Invalid object prefix response is // similar to object name response. var ErrInvalidObjectPrefix = ErrInvalidObjectName @@ -234,3 +242,23 @@ func ErrInvalidArgument(message string) error { RequestID: "minio", } } + +// ErrNoSuchBucketPolicy - No Such Bucket Policy response +// The specified bucket does not have a bucket policy. +func ErrNoSuchBucketPolicy(message string) error { + return ErrorResponse{ + Code: "NoSuchBucketPolicy", + Message: message, + RequestID: "minio", + } +} + +// ErrAPINotSupported - API not supported response +// The specified API call is not supported +func ErrAPINotSupported(message string) error { + return ErrorResponse{ + Code: "APINotSupported", + Message: message, + RequestID: "minio", + } +} diff --git a/vendor/github.com/minio/minio-go/api-get-object-file.go b/vendor/github.com/minio/minio-go/api-get-object-file.go index 265a58eea..477a0969f 100644 --- a/vendor/github.com/minio/minio-go/api-get-object-file.go +++ b/vendor/github.com/minio/minio-go/api-get-object-file.go @@ -48,7 +48,7 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error { } } - // Extract top level direcotry. + // Extract top level directory. objectDir, _ := filepath.Split(filePath) if objectDir != "" { // Create any missing top level directories. @@ -78,8 +78,15 @@ func (c Client) FGetObject(bucketName, objectName, filePath string) error { return err } + // Initialize get object request headers to set the + // appropriate range offsets to read from. + reqHeaders := NewGetReqHeaders() + if st.Size() > 0 { + reqHeaders.SetRange(st.Size(), 0) + } + // Seek to current position for incoming reader. - objectReader, objectStat, err := c.getObject(bucketName, objectName, st.Size(), 0) + objectReader, objectStat, err := c.getObject(bucketName, objectName, reqHeaders) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/api-get-object.go b/vendor/github.com/minio/minio-go/api-get-object.go new file mode 100644 index 000000000..2abd4608e --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-object.go @@ -0,0 +1,686 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016, 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "errors" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/minio/minio-go/pkg/encrypt" +) + +// GetEncryptedObject deciphers and streams data stored in the server after applying a specifed encryption materiels +func (c Client) GetEncryptedObject(bucketName, objectName string, encryptMaterials encrypt.Materials) (io.Reader, error) { + + if encryptMaterials == nil { + return nil, ErrInvalidArgument("Unable to recognize empty encryption properties") + } + + // Fetch encrypted object + encReader, err := c.GetObject(bucketName, objectName) + if err != nil { + return nil, err + } + // Stat object to get its encryption metadata + st, err := encReader.Stat() + if err != nil { + return nil, err + } + + // Setup object for decrytion, object is transparently + // decrypted as the consumer starts reading. + encryptMaterials.SetupDecryptMode(encReader, st.Metadata.Get(amzHeaderIV), st.Metadata.Get(amzHeaderKey)) + + // Success. + return encryptMaterials, nil +} + +// GetObject - returns an seekable, readable object. +func (c Client) GetObject(bucketName, objectName string) (*Object, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return nil, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, err + } + + var httpReader io.ReadCloser + var objectInfo ObjectInfo + var err error + + // Create request channel. + reqCh := make(chan getRequest) + // Create response channel. + resCh := make(chan getResponse) + // Create done channel. + doneCh := make(chan struct{}) + + // This routine feeds partial object data as and when the caller reads. + go func() { + defer close(reqCh) + defer close(resCh) + + // Used to verify if etag of object has changed since last read. + var etag string + + // Loop through the incoming control messages and read data. + for { + select { + // When the done channel is closed exit our routine. + case <-doneCh: + // Close the http response body before returning. + // This ends the connection with the server. + if httpReader != nil { + httpReader.Close() + } + return + + // Gather incoming request. + case req := <-reqCh: + // If this is the first request we may not need to do a getObject request yet. + if req.isFirstReq { + // First request is a Read/ReadAt. + if req.isReadOp { + reqHeaders := NewGetReqHeaders() + // Differentiate between wanting the whole object and just a range. + if req.isReadAt { + // If this is a ReadAt request only get the specified range. + // Range is set with respect to the offset and length of the buffer requested. + // Do not set objectInfo from the first readAt request because it will not get + // the whole object. + reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + } else { + if req.Offset > 0 { + reqHeaders.SetRange(req.Offset, 0) + } + + // First request is a Read request. + httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + } + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + etag = objectInfo.ETag + // Read at least firstReq.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + Size: int(size), + Error: err, + didRead: true, + } + } else { + // First request is a Stat or Seek call. + // Only need to run a StatObject until an actual Read or ReadAt request comes through. + objectInfo, err = c.StatObject(bucketName, objectName) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the go-routine. + return + } + etag = objectInfo.ETag + // Send back the first response. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } + } else if req.settingObjectInfo { // Request is just to get objectInfo. + reqHeaders := NewGetReqHeaders() + if etag != "" { + reqHeaders.SetMatchETag(etag) + } + objectInfo, err := c.statObject(bucketName, objectName, reqHeaders) + if err != nil { + resCh <- getResponse{ + Error: err, + } + // Exit the goroutine. + return + } + // Send back the objectInfo. + resCh <- getResponse{ + objectInfo: objectInfo, + } + } else { + // Offset changes fetch the new object at an Offset. + // Because the httpReader may not be set by the first + // request if it was a stat or seek it must be checked + // if the object has been read or not to only initialize + // new ones when they haven't been already. + // All readAt requests are new requests. + if req.DidOffsetChange || !req.beenRead { + reqHeaders := NewGetReqHeaders() + if etag != "" { + reqHeaders.SetMatchETag(etag) + } + if httpReader != nil { + // Close previously opened http reader. + httpReader.Close() + } + // If this request is a readAt only get the specified range. + if req.isReadAt { + // Range is set with respect to the offset and length of the buffer requested. + reqHeaders.SetRange(req.Offset, req.Offset+int64(len(req.Buffer))-1) + httpReader, _, err = c.getObject(bucketName, objectName, reqHeaders) + } else { + // Range is set with respect to the offset. + if req.Offset > 0 { + reqHeaders.SetRange(req.Offset, 0) + } + + httpReader, objectInfo, err = c.getObject(bucketName, objectName, reqHeaders) + } + if err != nil { + resCh <- getResponse{ + Error: err, + } + return + } + } + + // Read at least req.Buffer bytes, if not we have + // reached our EOF. + size, err := io.ReadFull(httpReader, req.Buffer) + if err == io.ErrUnexpectedEOF { + // If an EOF happens after reading some but not + // all the bytes ReadFull returns ErrUnexpectedEOF + err = io.EOF + } + // Reply back how much was read. + resCh <- getResponse{ + Size: int(size), + Error: err, + didRead: true, + objectInfo: objectInfo, + } + } + } + } + }() + + // Create a newObject through the information sent back by reqCh. + return newObject(reqCh, resCh, doneCh), nil +} + +// get request message container to communicate with internal +// go-routine. +type getRequest struct { + Buffer []byte + Offset int64 // readAt offset. + DidOffsetChange bool // Tracks the offset changes for Seek requests. + beenRead bool // Determines if this is the first time an object is being read. + isReadAt bool // Determines if this request is a request to a specific range + isReadOp bool // Determines if this request is a Read or Read/At request. + isFirstReq bool // Determines if this request is the first time an object is being accessed. + settingObjectInfo bool // Determines if this request is to set the objectInfo of an object. +} + +// get response message container to reply back for the request. +type getResponse struct { + Size int + Error error + didRead bool // Lets subsequent calls know whether or not httpReader has been initiated. + objectInfo ObjectInfo // Used for the first request. +} + +// Object represents an open object. It implements +// Reader, ReaderAt, Seeker, Closer for a HTTP stream. +type Object struct { + // Mutex. + mutex *sync.Mutex + + // User allocated and defined. + reqCh chan<- getRequest + resCh <-chan getResponse + doneCh chan<- struct{} + currOffset int64 + objectInfo ObjectInfo + + // Ask lower level to initiate data fetching based on currOffset + seekData bool + + // Keeps track of closed call. + isClosed bool + + // Keeps track of if this is the first call. + isStarted bool + + // Previous error saved for future calls. + prevErr error + + // Keeps track of if this object has been read yet. + beenRead bool + + // Keeps track of if objectInfo has been set yet. + objectInfoSet bool +} + +// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object. +// Returns back the size of the buffer read, if anything was read, as well +// as any error encountered. For all first requests sent on the object +// it is also responsible for sending back the objectInfo. +func (o *Object) doGetRequest(request getRequest) (getResponse, error) { + o.reqCh <- request + response := <-o.resCh + + // Return any error to the top level. + if response.Error != nil { + return response, response.Error + } + + // This was the first request. + if !o.isStarted { + // The object has been operated on. + o.isStarted = true + } + // Set the objectInfo if the request was not readAt + // and it hasn't been set before. + if !o.objectInfoSet && !request.isReadAt { + o.objectInfo = response.objectInfo + o.objectInfoSet = true + } + // Set beenRead only if it has not been set before. + if !o.beenRead { + o.beenRead = response.didRead + } + // Data are ready on the wire, no need to reinitiate connection in lower level + o.seekData = false + + return response, nil +} + +// setOffset - handles the setting of offsets for +// Read/ReadAt/Seek requests. +func (o *Object) setOffset(bytesRead int64) error { + // Update the currentOffset. + o.currOffset += bytesRead + + if o.currOffset >= o.objectInfo.Size { + return io.EOF + } + return nil +} + +// Read reads up to len(b) bytes into b. It returns the number of +// bytes read (0 <= n <= len(p)) and any error encountered. Returns +// io.EOF upon end of file. +func (o *Object) Read(b []byte) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is previous error saved from previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + // Create a new request. + readReq := getRequest{ + isReadOp: true, + beenRead: o.beenRead, + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readReq.isFirstReq = true + } + + // Ask to establish a new data fetch routine based on seekData flag + readReq.DidOffsetChange = o.seekData + readReq.Offset = o.currOffset + + // Send and receive from the first request. + response, err := o.doGetRequest(readReq) + if err != nil && err != io.EOF { + // Save the error for future calls. + o.prevErr = err + return response.Size, err + } + + // Bytes read. + bytesRead := int64(response.Size) + + // Set the new offset. + oerr := o.setOffset(bytesRead) + if oerr != nil { + // Save the error for future calls. + o.prevErr = oerr + return response.Size, oerr + } + + // Return the response. + return response.Size, err +} + +// Stat returns the ObjectInfo structure describing Object. +func (o *Object) Stat() (ObjectInfo, error) { + if o == nil { + return ObjectInfo{}, ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed { + return ObjectInfo{}, o.prevErr + } + + // This is the first request. + if !o.isStarted || !o.objectInfoSet { + statReq := getRequest{ + isFirstReq: !o.isStarted, + settingObjectInfo: !o.objectInfoSet, + } + + // Send the request and get the response. + _, err := o.doGetRequest(statReq) + if err != nil { + o.prevErr = err + return ObjectInfo{}, err + } + } + + return o.objectInfo, nil +} + +// ReadAt reads len(b) bytes from the File starting at byte offset +// off. It returns the number of bytes read and the error, if any. +// ReadAt always returns a non-nil error when n < len(b). At end of +// file, that error is io.EOF. +func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // prevErr is error which was saved in previous operation. + if o.prevErr != nil || o.isClosed { + return 0, o.prevErr + } + + // Can only compare offsets to size when size has been set. + if o.objectInfoSet { + // If offset is negative than we return io.EOF. + // If offset is greater than or equal to object size we return io.EOF. + if offset >= o.objectInfo.Size || offset < 0 { + return 0, io.EOF + } + } + + // Create the new readAt request. + readAtReq := getRequest{ + isReadOp: true, + isReadAt: true, + DidOffsetChange: true, // Offset always changes. + beenRead: o.beenRead, // Set if this is the first request to try and read. + Offset: offset, // Set the offset. + Buffer: b, + } + + // Alert that this is the first request. + if !o.isStarted { + readAtReq.isFirstReq = true + } + + // Send and receive from the first request. + response, err := o.doGetRequest(readAtReq) + if err != nil && err != io.EOF { + // Save the error. + o.prevErr = err + return response.Size, err + } + // Bytes read. + bytesRead := int64(response.Size) + // There is no valid objectInfo yet + // to compare against for EOF. + if !o.objectInfoSet { + // Update the currentOffset. + o.currOffset += bytesRead + } else { + // If this was not the first request update + // the offsets and compare against objectInfo + // for EOF. + oerr := o.setOffset(bytesRead) + if oerr != nil { + o.prevErr = oerr + return response.Size, oerr + } + } + return response.Size, err +} + +// Seek sets the offset for the next Read or Write to offset, +// interpreted according to whence: 0 means relative to the +// origin of the file, 1 means relative to the current offset, +// and 2 means relative to the end. +// Seek returns the new offset and an error, if any. +// +// Seeking to a negative offset is an error. Seeking to any positive +// offset is legal, subsequent io operations succeed until the +// underlying object is not closed. +func (o *Object) Seek(offset int64, whence int) (n int64, err error) { + if o == nil { + return 0, ErrInvalidArgument("Object is nil") + } + + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + if o.prevErr != nil { + // At EOF seeking is legal allow only io.EOF, for any other errors we return. + if o.prevErr != io.EOF { + return 0, o.prevErr + } + } + + // Negative offset is valid for whence of '2'. + if offset < 0 && whence != 2 { + return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) + } + + // This is the first request. So before anything else + // get the ObjectInfo. + if !o.isStarted || !o.objectInfoSet { + // Create the new Seek request. + seekReq := getRequest{ + isReadOp: false, + Offset: offset, + isFirstReq: true, + } + // Send and receive from the seek request. + _, err := o.doGetRequest(seekReq) + if err != nil { + // Save the error. + o.prevErr = err + return 0, err + } + } + + // Switch through whence. + switch whence { + default: + return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) + case 0: + if offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset = offset + case 1: + if o.currOffset+offset > o.objectInfo.Size { + return 0, io.EOF + } + o.currOffset += offset + case 2: + // Seeking to positive offset is valid for whence '2', but + // since we are backing a Reader we have reached 'EOF' if + // offset is positive. + if offset > 0 { + return 0, io.EOF + } + // Seeking to negative position not allowed for whence. + if o.objectInfo.Size+offset < 0 { + return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) + } + o.currOffset = o.objectInfo.Size + offset + } + // Reset the saved error since we successfully seeked, let the Read + // and ReadAt decide. + if o.prevErr == io.EOF { + o.prevErr = nil + } + + // Ask lower level to fetch again from source + o.seekData = true + + // Return the effective offset. + return o.currOffset, nil +} + +// Close - The behavior of Close after the first call returns error +// for subsequent Close() calls. +func (o *Object) Close() (err error) { + if o == nil { + return ErrInvalidArgument("Object is nil") + } + // Locking. + o.mutex.Lock() + defer o.mutex.Unlock() + + // if already closed return an error. + if o.isClosed { + return o.prevErr + } + + // Close successfully. + close(o.doneCh) + + // Save for future operations. + errMsg := "Object is already closed. Bad file descriptor." + o.prevErr = errors.New(errMsg) + // Save here that we closed done channel successfully. + o.isClosed = true + return nil +} + +// newObject instantiates a new *minio.Object* +// ObjectInfo will be set by setObjectInfo +func newObject(reqCh chan<- getRequest, resCh <-chan getResponse, doneCh chan<- struct{}) *Object { + return &Object{ + mutex: &sync.Mutex{}, + reqCh: reqCh, + resCh: resCh, + doneCh: doneCh, + } +} + +// getObject - retrieve object from Object Storage. +// +// Additionally this function also takes range arguments to download the specified +// range bytes of an object. Setting offset and length = 0 will download the full object. +// +// For more information about the HTTP Range header. +// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. +func (c Client) getObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { + // Validate input arguments. + if err := isValidBucketName(bucketName); err != nil { + return nil, ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { + return nil, ObjectInfo{}, err + } + + // Set all the necessary reqHeaders. + customHeader := make(http.Header) + for key, value := range reqHeaders.Header { + customHeader[key] = value + } + + // Execute GET on objectName. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeader, + contentSHA256Bytes: emptySHA256, + }) + if err != nil { + return nil, ObjectInfo{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { + return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Trim off the odd double quotes from ETag in the beginning and end. + md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + md5sum = strings.TrimSuffix(md5sum, "\"") + + // Parse the date. + date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) + if err != nil { + msg := "Last-Modified time format not recognized. " + reportIssue + return nil, ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: msg, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } + } + + // Get content-type. + contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) + if contentType == "" { + contentType = "application/octet-stream" + } + var objectStat ObjectInfo + objectStat.ETag = md5sum + objectStat.Key = objectName + objectStat.Size = resp.ContentLength + objectStat.LastModified = date + objectStat.ContentType = contentType + + // do not close body here, caller will close + return resp.Body, objectStat, nil +} diff --git a/vendor/github.com/minio/minio-go/api-get-policy.go b/vendor/github.com/minio/minio-go/api-get-policy.go new file mode 100644 index 000000000..7491df330 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-get-policy.go @@ -0,0 +1,106 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/minio/minio-go/pkg/policy" +) + +// GetBucketPolicy - get bucket policy at a given path. +func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy policy.BucketPolicy, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return policy.BucketPolicyNone, err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return policy.BucketPolicyNone, err + } + policyInfo, err := c.getBucketPolicy(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return policy.BucketPolicyNone, nil + } + return policy.BucketPolicyNone, err + } + return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil +} + +// ListBucketPolicies - list all policies for a given prefix and all its children. +func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return map[string]policy.BucketPolicy{}, err + } + if err := isValidObjectPrefix(objectPrefix); err != nil { + return map[string]policy.BucketPolicy{}, err + } + policyInfo, err := c.getBucketPolicy(bucketName) + if err != nil { + errResponse := ToErrorResponse(err) + if errResponse.Code == "NoSuchBucketPolicy" { + return map[string]policy.BucketPolicy{}, nil + } + return map[string]policy.BucketPolicy{}, err + } + return policy.GetPolicies(policyInfo.Statements, bucketName), nil +} + +// Default empty bucket access policy. +var emptyBucketAccessPolicy = policy.BucketAccessPolicy{ + Version: "2012-10-17", +} + +// Request server for current bucket policy. +func (c Client) getBucketPolicy(bucketName string) (policy.BucketAccessPolicy, error) { + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, + }) + + defer closeResponse(resp) + if err != nil { + return emptyBucketAccessPolicy, err + } + + if resp != nil { + if resp.StatusCode != http.StatusOK { + return emptyBucketAccessPolicy, httpRespToErrorResponse(resp, bucketName, "") + } + } + + bucketPolicyBuf, err := ioutil.ReadAll(resp.Body) + if err != nil { + return emptyBucketAccessPolicy, err + } + + policy := policy.BucketAccessPolicy{} + err = json.Unmarshal(bucketPolicyBuf, &policy) + return policy, err +} diff --git a/vendor/github.com/minio/minio-go/api-get.go b/vendor/github.com/minio/minio-go/api-get.go deleted file mode 100644 index 56d44c9f5..000000000 --- a/vendor/github.com/minio/minio-go/api-get.go +++ /dev/null @@ -1,537 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -import ( - "errors" - "fmt" - "io" - "math" - "net/http" - "net/url" - "strings" - "sync" - "time" -) - -// GetBucketACL - Get the permissions on an existing bucket. -// -// Returned values are: -// -// private - Owner gets full access. -// public-read - Owner gets full access, others get read access. -// public-read-write - Owner gets full access, others get full access -// too. -// authenticated-read - Owner gets full access, authenticated users -// get read access. -func (c Client) GetBucketACL(bucketName string) (BucketACL, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return "", err - } - - // Set acl query. - urlValues := make(url.Values) - urlValues.Set("acl", "") - - // Instantiate a new request. - req, err := c.newRequest("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - }) - if err != nil { - return "", err - } - - // Initiate the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return "", err - } - if resp != nil { - if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") - } - } - - // Decode access control policy. - policy := accessControlPolicy{} - err = xmlDecoder(resp.Body, &policy) - if err != nil { - return "", err - } - - // We need to avoid following de-serialization check for Google - // Cloud Storage. On Google Cloud Storage "private" canned ACL's - // policy do not have grant list. Treat it as a valid case, check - // for all other vendors. - if !isGoogleEndpoint(c.endpointURL) { - if policy.AccessControlList.Grant == nil { - errorResponse := ErrorResponse{ - Code: "InternalError", - Message: "Access control Grant list is empty. " + reportIssue, - BucketName: bucketName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), - } - return "", errorResponse - } - } - - // Boolean cues to indentify right canned acls. - var publicRead, publicWrite, authenticatedRead bool - - // Handle grants. - grants := policy.AccessControlList.Grant - for _, g := range grants { - if g.Grantee.URI == "" && g.Permission == "FULL_CONTROL" { - continue - } - if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers" && g.Permission == "READ" { - authenticatedRead = true - break - } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "WRITE" { - publicWrite = true - } else if g.Grantee.URI == "http://acs.amazonaws.com/groups/global/AllUsers" && g.Permission == "READ" { - publicRead = true - } - } - - // Verify if acl is authenticated read. - if authenticatedRead { - return BucketACL("authenticated-read"), nil - } - // Verify if acl is private. - if !publicWrite && !publicRead { - return BucketACL("private"), nil - } - // Verify if acl is public-read. - if !publicWrite && publicRead { - return BucketACL("public-read"), nil - } - // Verify if acl is public-read-write. - if publicRead && publicWrite { - return BucketACL("public-read-write"), nil - } - - return "", ErrorResponse{ - Code: "NoSuchBucketPolicy", - Message: "The specified bucket does not have a bucket policy.", - BucketName: bucketName, - RequestID: "minio", - } -} - -// GetObject - returns an seekable, readable object. -func (c Client) GetObject(bucketName, objectName string) (*Object, error) { - // Input validation. - if err := isValidBucketName(bucketName); err != nil { - return nil, err - } - if err := isValidObjectName(objectName); err != nil { - return nil, err - } - // Send an explicit info to get the actual object size. - objectInfo, err := c.StatObject(bucketName, objectName) - if err != nil { - return nil, err - } - - // Create request channel. - reqCh := make(chan readRequest) - // Create response channel. - resCh := make(chan readResponse) - // Create done channel. - doneCh := make(chan struct{}) - - // This routine feeds partial object data as and when the caller - // reads. - go func() { - defer close(reqCh) - defer close(resCh) - - // Loop through the incoming control messages and read data. - for { - select { - // When the done channel is closed exit our routine. - case <-doneCh: - return - // Request message. - case req := <-reqCh: - // Get shortest length. - // NOTE: Last remaining bytes are usually smaller than - // req.Buffer size. Use that as the final length. - length := math.Min(float64(len(req.Buffer)), float64(objectInfo.Size-req.Offset)) - httpReader, _, err := c.getObject(bucketName, objectName, req.Offset, int64(length)) - if err != nil { - resCh <- readResponse{ - Error: err, - } - return - } - size, err := io.ReadFull(httpReader, req.Buffer) - if err == io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not - // all the bytes ReadFull returns ErrUnexpectedEOF - err = io.EOF - } - resCh <- readResponse{ - Size: int(size), - Error: err, - } - } - } - }() - // Return the readerAt backed by routine. - return newObject(reqCh, resCh, doneCh, objectInfo), nil -} - -// Read response message container to reply back for the request. -type readResponse struct { - Size int - Error error -} - -// Read request message container to communicate with internal -// go-routine. -type readRequest struct { - Buffer []byte - Offset int64 // readAt offset. -} - -// Object represents an open object. It implements Read, ReadAt, -// Seeker, Close for a HTTP stream. -type Object struct { - // Mutex. - mutex *sync.Mutex - - // User allocated and defined. - reqCh chan<- readRequest - resCh <-chan readResponse - doneCh chan<- struct{} - currOffset int64 - objectInfo ObjectInfo - - // Keeps track of closed call. - isClosed bool - - // Previous error saved for future calls. - prevErr error -} - -// Read reads up to len(p) bytes into p. It returns the number of -// bytes read (0 <= n <= len(p)) and any error encountered. Returns -// io.EOF upon end of file. -func (o *Object) Read(b []byte) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // Previous prevErr is which was saved in previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // If current offset has reached Size limit, return EOF. - if o.currOffset >= o.objectInfo.Size { - return 0, io.EOF - } - - // Send current information over control channel to indicate we - // are ready. - reqMsg := readRequest{} - - // Send the offset and pointer to the buffer over the channel. - reqMsg.Buffer = b - reqMsg.Offset = o.currOffset - - // Send read request over the control channel. - o.reqCh <- reqMsg - - // Get data over the response channel. - dataMsg := <-o.resCh - - // Bytes read. - bytesRead := int64(dataMsg.Size) - - // Update current offset. - o.currOffset += bytesRead - - if dataMsg.Error == nil { - // If currOffset read is equal to objectSize - // We have reached end of file, we return io.EOF. - if o.currOffset >= o.objectInfo.Size { - return dataMsg.Size, io.EOF - } - return dataMsg.Size, nil - } - - // Save any error. - o.prevErr = dataMsg.Error - return dataMsg.Size, dataMsg.Error -} - -// Stat returns the ObjectInfo structure describing object. -func (o *Object) Stat() (ObjectInfo, error) { - if o == nil { - return ObjectInfo{}, ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil || o.isClosed { - return ObjectInfo{}, o.prevErr - } - - return o.objectInfo, nil -} - -// ReadAt reads len(b) bytes from the File starting at byte offset -// off. It returns the number of bytes read and the error, if any. -// ReadAt always returns a non-nil error when n < len(b). At end of -// file, that error is io.EOF. -func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // prevErr is which was saved in previous operation. - if o.prevErr != nil || o.isClosed { - return 0, o.prevErr - } - - // If offset is negative and offset is greater than or equal to - // object size we return EOF. - if offset < 0 || offset >= o.objectInfo.Size { - return 0, io.EOF - } - - // Send current information over control channel to indicate we - // are ready. - reqMsg := readRequest{} - - // Send the offset and pointer to the buffer over the channel. - reqMsg.Buffer = b - reqMsg.Offset = offset - - // Send read request over the control channel. - o.reqCh <- reqMsg - - // Get data over the response channel. - dataMsg := <-o.resCh - - // Bytes read. - bytesRead := int64(dataMsg.Size) - - if dataMsg.Error == nil { - // If offset+bytes read is equal to objectSize - // we have reached end of file, we return io.EOF. - if offset+bytesRead == o.objectInfo.Size { - return dataMsg.Size, io.EOF - } - return dataMsg.Size, nil - } - - // Save any error. - o.prevErr = dataMsg.Error - return dataMsg.Size, dataMsg.Error -} - -// Seek sets the offset for the next Read or Write to offset, -// interpreted according to whence: 0 means relative to the -// origin of the file, 1 means relative to the current offset, -// and 2 means relative to the end. -// Seek returns the new offset and an error, if any. -// -// Seeking to a negative offset is an error. Seeking to any positive -// offset is legal, subsequent io operations succeed until the -// underlying object is not closed. -func (o *Object) Seek(offset int64, whence int) (n int64, err error) { - if o == nil { - return 0, ErrInvalidArgument("Object is nil") - } - - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - if o.prevErr != nil { - // At EOF seeking is legal, for any other errors we return. - if o.prevErr != io.EOF { - return 0, o.prevErr - } - } - - // Negative offset is valid for whence of '2'. - if offset < 0 && whence != 2 { - return 0, ErrInvalidArgument(fmt.Sprintf("Negative position not allowed for %d.", whence)) - } - switch whence { - default: - return 0, ErrInvalidArgument(fmt.Sprintf("Invalid whence %d", whence)) - case 0: - if offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset = offset - case 1: - if o.currOffset+offset > o.objectInfo.Size { - return 0, io.EOF - } - o.currOffset += offset - case 2: - // Seeking to positive offset is valid for whence '2', but - // since we are backing a Reader we have reached 'EOF' if - // offset is positive. - if offset > 0 { - return 0, io.EOF - } - // Seeking to negative position not allowed for whence. - if o.objectInfo.Size+offset < 0 { - return 0, ErrInvalidArgument(fmt.Sprintf("Seeking at negative offset not allowed for %d", whence)) - } - o.currOffset += offset - } - // Return the effective offset. - return o.currOffset, nil -} - -// Close - The behavior of Close after the first call returns error -// for subsequent Close() calls. -func (o *Object) Close() (err error) { - if o == nil { - return ErrInvalidArgument("Object is nil") - } - // Locking. - o.mutex.Lock() - defer o.mutex.Unlock() - - // if already closed return an error. - if o.isClosed { - return o.prevErr - } - - // Close successfully. - close(o.doneCh) - - // Save for future operations. - errMsg := "Object is already closed. Bad file descriptor." - o.prevErr = errors.New(errMsg) - // Save here that we closed done channel successfully. - o.isClosed = true - return nil -} - -// newObject instantiates a new *minio.Object* -func newObject(reqCh chan<- readRequest, resCh <-chan readResponse, doneCh chan<- struct{}, objectInfo ObjectInfo) *Object { - return &Object{ - mutex: &sync.Mutex{}, - reqCh: reqCh, - resCh: resCh, - doneCh: doneCh, - objectInfo: objectInfo, - } -} - -// getObject - retrieve object from Object Storage. -// -// Additionally this function also takes range arguments to download the specified -// range bytes of an object. Setting offset and length = 0 will download the full object. -// -// For more information about the HTTP Range header. -// go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. -func (c Client) getObject(bucketName, objectName string, offset, length int64) (io.ReadCloser, ObjectInfo, error) { - // Validate input arguments. - if err := isValidBucketName(bucketName); err != nil { - return nil, ObjectInfo{}, err - } - if err := isValidObjectName(objectName); err != nil { - return nil, ObjectInfo{}, err - } - - customHeader := make(http.Header) - // Set ranges if length and offset are valid. - if length > 0 && offset >= 0 { - customHeader.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) - } else if offset > 0 && length == 0 { - customHeader.Set("Range", fmt.Sprintf("bytes=%d-", offset)) - } else if length < 0 && offset == 0 { - customHeader.Set("Range", fmt.Sprintf("bytes=%d", length)) - } - - // Instantiate a new request. - req, err := c.newRequest("GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - customHeader: customHeader, - }) - if err != nil { - return nil, ObjectInfo{}, err - } - // Execute the request. - resp, err := c.do(req) - if err != nil { - return nil, ObjectInfo{}, err - } - if resp != nil { - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent { - return nil, ObjectInfo{}, httpRespToErrorResponse(resp, bucketName, objectName) - } - } - - // Trim off the odd double quotes from ETag in the beginning and end. - md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - md5sum = strings.TrimSuffix(md5sum, "\"") - - // Parse the date. - date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) - if err != nil { - msg := "Last-Modified time format not recognized. " + reportIssue - return nil, ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: msg, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), - } - } - // Get content-type. - contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) - if contentType == "" { - contentType = "application/octet-stream" - } - var objectStat ObjectInfo - objectStat.ETag = md5sum - objectStat.Key = objectName - objectStat.Size = resp.ContentLength - objectStat.LastModified = date - objectStat.ContentType = contentType - - // do not close body here, caller will close - return resp.Body, objectStat, nil -} diff --git a/vendor/github.com/minio/minio-go/api-list.go b/vendor/github.com/minio/minio-go/api-list.go index c7cbac7d1..6a228179e 100644 --- a/vendor/github.com/minio/minio-go/api-list.go +++ b/vendor/github.com/minio/minio-go/api-list.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -34,13 +34,8 @@ import ( // } // func (c Client) ListBuckets() ([]BucketInfo, error) { - // Instantiate a new request. - req, err := c.newRequest("GET", requestMetadata{}) - if err != nil { - return nil, err - } - // Initiate the request. - resp, err := c.do(req) + // Execute GET on service. + resp, err := c.executeMethod("GET", requestMetadata{contentSHA256Bytes: emptySHA256}) defer closeResponse(resp) if err != nil { return nil, err @@ -58,6 +53,187 @@ func (c Client) ListBuckets() ([]BucketInfo, error) { return listAllMyBucketsResult.Buckets.Bucket, nil } +/// Bucket Read Operations. + +// ListObjectsV2 lists all objects matching the objectPrefix from +// the specified bucket. If recursion is enabled it would list +// all subdirectories and all its contents. +// +// Your input parameters are just bucketName, objectPrefix, recursive +// and a done channel for pro-actively closing the internal go +// routine. If you enable recursive as 'true' this function will +// return back all the objects in a given bucket name and object +// prefix. +// +// api := client.New(....) +// // Create a done channel. +// doneCh := make(chan struct{}) +// defer close(doneCh) +// // Recurively list all objects in 'mytestbucket' +// recursive := true +// for message := range api.ListObjectsV2("mytestbucket", "starthere", recursive, doneCh) { +// fmt.Println(message) +// } +// +func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { + // Allocate new list objects channel. + objectStatCh := make(chan ObjectInfo, 1) + // Default listing is delimited at "/" + delimiter := "/" + if recursive { + // If recursive we do not delimit. + delimiter = "" + } + // Return object owner information by default + fetchOwner := true + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + // Validate incoming object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + defer close(objectStatCh) + objectStatCh <- ObjectInfo{ + Err: err, + } + return objectStatCh + } + + // Initiate list objects goroutine here. + go func(objectStatCh chan<- ObjectInfo) { + defer close(objectStatCh) + // Save continuationToken for next request. + var continuationToken string + for { + // Get list of objects a maximum of 1000 per request. + result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000) + if err != nil { + objectStatCh <- ObjectInfo{ + Err: err, + } + return + } + + // If contents are available loop through and send over channel. + for _, object := range result.Contents { + // Save the marker. + select { + // Send object content. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // Send all common prefixes if any. + // NOTE: prefixes are only present if the request is delimited. + for _, obj := range result.CommonPrefixes { + object := ObjectInfo{} + object.Key = obj.Prefix + object.Size = 0 + select { + // Send object prefixes. + case objectStatCh <- object: + // If receives done from the caller, return here. + case <-doneCh: + return + } + } + + // If continuation token present, save it for next request. + if result.NextContinuationToken != "" { + continuationToken = result.NextContinuationToken + } + + // Listing ends result is not truncated, return right here. + if !result.IsTruncated { + return + } + } + }(objectStatCh) + return objectStatCh +} + +// listObjectsV2Query - (List Objects V2) - List some or all (up to 1000) of the objects in a bucket. +// +// You can use the request parameters as selection criteria to return a subset of the objects in a bucket. +// request parameters :- +// --------- +// ?continuation-token - Specifies the key to start with when listing objects in a bucket. +// ?delimiter - A delimiter is a character you use to group keys. +// ?prefix - Limits the response to keys that begin with the specified prefix. +// ?max-keys - Sets the maximum number of keys returned in the response body. +func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { + // Validate bucket name. + if err := isValidBucketName(bucketName); err != nil { + return ListBucketV2Result{}, err + } + // Validate object prefix. + if err := isValidObjectPrefix(objectPrefix); err != nil { + return ListBucketV2Result{}, err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + + // Always set list-type in ListObjects V2 + urlValues.Set("list-type", "2") + + // Set object prefix. + if objectPrefix != "" { + urlValues.Set("prefix", objectPrefix) + } + // Set continuation token + if continuationToken != "" { + urlValues.Set("continuation-token", continuationToken) + } + // Set delimiter. + if delimiter != "" { + urlValues.Set("delimiter", delimiter) + } + + // Fetch owner when listing + if fetchOwner { + urlValues.Set("fetch-owner", "true") + } + + // maxkeys should default to 1000 or less. + if maxkeys == 0 || maxkeys > 1000 { + maxkeys = 1000 + } + // Set max keys. + urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, + }) + defer closeResponse(resp) + if err != nil { + return ListBucketV2Result{}, err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return ListBucketV2Result{}, httpRespToErrorResponse(resp, bucketName, "") + } + } + + // Decode listBuckets XML. + listBucketResult := ListBucketV2Result{} + err = xmlDecoder(resp.Body, &listBucketResult) + if err != nil { + return listBucketResult, err + } + return listBucketResult, nil +} + // ListObjects - (List Objects) - List some objects or all recursively. // // ListObjects lists all objects matching the objectPrefix from @@ -82,7 +258,7 @@ func (c Client) ListBuckets() ([]BucketInfo, error) { // func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, doneCh <-chan struct{}) <-chan ObjectInfo { // Allocate new list objects channel. - objectStatCh := make(chan ObjectInfo, 1000) + objectStatCh := make(chan ObjectInfo, 1) // Default listing is delimited at "/" delimiter := "/" if recursive { @@ -163,8 +339,6 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don return objectStatCh } -/// Bucket Read Operations. - // listObjects - (List Objects) - List some or all (up to 1000) of the objects in a bucket. // // You can use the request parameters as selection criteria to return a subset of the objects in a bucket. @@ -174,14 +348,14 @@ func (c Client) ListObjects(bucketName, objectPrefix string, recursive bool, don // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-keys - Sets the maximum number of keys returned in the response body. -func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (listBucketResult, error) { +func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimiter string, maxkeys int) (ListBucketResult, error) { // Validate bucket name. if err := isValidBucketName(bucketName); err != nil { - return listBucketResult{}, err + return ListBucketResult{}, err } // Validate object prefix. if err := isValidObjectPrefix(objectPrefix); err != nil { - return listBucketResult{}, err + return ListBucketResult{}, err } // Get resources properly escaped and lined up before // using them in http request. @@ -206,27 +380,23 @@ func (c Client) listObjectsQuery(bucketName, objectPrefix, objectMarker, delimit // Set max keys. urlValues.Set("max-keys", fmt.Sprintf("%d", maxkeys)) - // Initialize a new request. - req, err := c.newRequest("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return listBucketResult{}, err - } - // Execute list buckets. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return listBucketResult{}, err + return ListBucketResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return listBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") + return ListBucketResult{}, httpRespToErrorResponse(resp, bucketName, "") } } // Decode listBuckets XML. - listBucketResult := listBucketResult{} + listBucketResult := ListBucketResult{} err = xmlDecoder(resp.Body, &listBucketResult) if err != nil { return listBucketResult, err @@ -264,7 +434,7 @@ func (c Client) ListIncompleteUploads(bucketName, objectPrefix string, recursive // listIncompleteUploads lists all incomplete uploads. func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive, aggregateSize bool, doneCh <-chan struct{}) <-chan ObjectMultipartInfo { // Allocate channel for multipart uploads. - objectMultipartStatCh := make(chan ObjectMultipartInfo, 1000) + objectMultipartStatCh := make(chan ObjectMultipartInfo, 1) // Delimiter is set to "/" by default. delimiter := "/" if recursive { @@ -314,6 +484,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive objectMultipartStatCh <- ObjectMultipartInfo{ Err: err, } + continue } } select { @@ -359,7 +530,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive // ?delimiter - A delimiter is a character you use to group keys. // ?prefix - Limits the response to keys that begin with the specified prefix. // ?max-uploads - Sets the maximum number of multipart uploads returned in the response body. -func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (listMultipartUploadsResult, error) { +func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, prefix, delimiter string, maxUploads int) (ListMultipartUploadsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set uploads. @@ -388,27 +559,23 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, // Set max-uploads. urlValues.Set("max-uploads", fmt.Sprintf("%d", maxUploads)) - // Instantiate a new request. - req, err := c.newRequest("GET", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, + // Execute GET on bucketName to list multipart uploads. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return listMultipartUploadsResult{}, err - } - // Execute list multipart uploads request. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return listMultipartUploadsResult{}, err + return ListMultipartUploadsResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return listMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") + return ListMultipartUploadsResult{}, httpRespToErrorResponse(resp, bucketName, "") } } // Decode response body. - listMultipartUploadsResult := listMultipartUploadsResult{} + listMultipartUploadsResult := ListMultipartUploadsResult{} err = xmlDecoder(resp.Body, &listMultipartUploadsResult) if err != nil { return listMultipartUploadsResult, err @@ -417,10 +584,10 @@ func (c Client) listMultipartUploadsQuery(bucketName, keyMarker, uploadIDMarker, } // listObjectParts list all object parts recursively. -func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]objectPart, err error) { +func (c Client) listObjectParts(bucketName, objectName, uploadID string) (partsInfo map[int]ObjectPart, err error) { // Part number marker for the next batch of request. var nextPartNumberMarker int - partsInfo = make(map[int]objectPart) + partsInfo = make(map[int]ObjectPart) for { // Get list of uploaded parts a maximum of 1000 per request. listObjPartsResult, err := c.listObjectPartsQuery(bucketName, objectName, uploadID, nextPartNumberMarker, 1000) @@ -495,7 +662,7 @@ func (c Client) getTotalMultipartSize(bucketName, objectName, uploadID string) ( // ?part-number-marker - Specifies the part after which listing should // begin. // ?max-parts - Maximum parts to be listed per request. -func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (listObjectPartsResult, error) { +func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, partNumberMarker, maxParts int) (ListObjectPartsResult, error) { // Get resources properly escaped and lined up before using them in http request. urlValues := make(url.Values) // Set part number marker. @@ -510,27 +677,24 @@ func (c Client) listObjectPartsQuery(bucketName, objectName, uploadID string, pa // Set max parts. urlValues.Set("max-parts", fmt.Sprintf("%d", maxParts)) - req, err := c.newRequest("GET", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, + // Execute GET on objectName to get list of parts. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return listObjectPartsResult{}, err - } - // Exectue list object parts. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return listObjectPartsResult{}, err + return ListObjectPartsResult{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return listObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ListObjectPartsResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Decode list object parts XML. - listObjectPartsResult := listObjectPartsResult{} + listObjectPartsResult := ListObjectPartsResult{} err = xmlDecoder(resp.Body, &listObjectPartsResult) if err != nil { return listObjectPartsResult, err diff --git a/vendor/github.com/minio/minio-go/api-notification.go b/vendor/github.com/minio/minio-go/api-notification.go new file mode 100644 index 000000000..cbea1c6da --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-notification.go @@ -0,0 +1,225 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "bufio" + "encoding/json" + "io" + "net/http" + "net/url" + "time" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// GetBucketNotification - get bucket notification at a given path. +func (c Client) GetBucketNotification(bucketName string) (bucketNotification BucketNotification, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return BucketNotification{}, err + } + notification, err := c.getBucketNotification(bucketName) + if err != nil { + return BucketNotification{}, err + } + return notification, nil +} + +// Request server for notification rules. +func (c Client) getBucketNotification(bucketName string) (BucketNotification, error) { + urlValues := make(url.Values) + urlValues.Set("notification", "") + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, + }) + + defer closeResponse(resp) + if err != nil { + return BucketNotification{}, err + } + return processBucketNotificationResponse(bucketName, resp) + +} + +// processes the GetNotification http response from the server. +func processBucketNotificationResponse(bucketName string, resp *http.Response) (BucketNotification, error) { + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + return BucketNotification{}, errResponse + } + var bucketNotification BucketNotification + err := xmlDecoder(resp.Body, &bucketNotification) + if err != nil { + return BucketNotification{}, err + } + return bucketNotification, nil +} + +// Indentity represents the user id, this is a compliance field. +type identity struct { + PrincipalID string `json:"principalId"` +} + +// Notification event bucket metadata. +type bucketMeta struct { + Name string `json:"name"` + OwnerIdentity identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// Notification event object metadata. +type objectMeta struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// Notification event server specific metadata. +type eventMeta struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket bucketMeta `json:"bucket"` + Object objectMeta `json:"object"` +} + +// sourceInfo represents information on the client that +// triggered the event notification. +type sourceInfo struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// NotificationEvent represents an Amazon an S3 bucket notification event. +type NotificationEvent struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName string `json:"eventName"` + UserIdentity identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 eventMeta `json:"s3"` + Source sourceInfo `json:"source"` +} + +// NotificationInfo - represents the collection of notification events, additionally +// also reports errors if any while listening on bucket notifications. +type NotificationInfo struct { + Records []NotificationEvent + Err error +} + +// ListenBucketNotification - listen on bucket notifications. +func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo { + notificationInfoCh := make(chan NotificationInfo, 1) + // Only success, start a routine to start reading line by line. + go func(notificationInfoCh chan<- NotificationInfo) { + defer close(notificationInfoCh) + + // Validate the bucket name. + if err := isValidBucketName(bucketName); err != nil { + notificationInfoCh <- NotificationInfo{ + Err: err, + } + return + } + + // Check ARN partition to verify if listening bucket is supported + if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) { + notificationInfoCh <- NotificationInfo{ + Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"), + } + return + } + + // Continously run and listen on bucket notification. + // Create a done channel to control 'ListObjects' go routine. + retryDoneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(retryDoneCh) + + // Wait on the jitter retry loop. + for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) { + urlValues := make(url.Values) + urlValues.Set("prefix", prefix) + urlValues.Set("suffix", suffix) + urlValues["events"] = events + + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("GET", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, + }) + if err != nil { + continue + } + + // Validate http response, upon error return quickly. + if resp.StatusCode != http.StatusOK { + errResponse := httpRespToErrorResponse(resp, bucketName, "") + notificationInfoCh <- NotificationInfo{ + Err: errResponse, + } + return + } + + // Initialize a new bufio scanner, to read line by line. + bio := bufio.NewScanner(resp.Body) + + // Close the response body. + defer resp.Body.Close() + + // Unmarshal each line, returns marshalled values. + for bio.Scan() { + var notificationInfo NotificationInfo + if err = json.Unmarshal(bio.Bytes(), ¬ificationInfo); err != nil { + continue + } + // Send notifications on channel only if there are events received. + if len(notificationInfo.Records) > 0 { + select { + case notificationInfoCh <- notificationInfo: + case <-doneCh: + return + } + } + } + // Look for any underlying errors. + if err = bio.Err(); err != nil { + // For an unexpected connection drop from server, we close the body + // and re-connect. + if err == io.ErrUnexpectedEOF { + resp.Body.Close() + } + } + } + }(notificationInfoCh) + + // Returns the notification info channel, for caller to start reading from. + return notificationInfoCh +} diff --git a/vendor/github.com/minio/minio-go/api-presigned.go b/vendor/github.com/minio/minio-go/api-presigned.go index 1a2a34752..f9d05ab9b 100644 --- a/vendor/github.com/minio/minio-go/api-presigned.go +++ b/vendor/github.com/minio/minio-go/api-presigned.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,32 +20,36 @@ import ( "errors" "net/url" "time" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" ) -// supportedGetReqParams - supported request parameters for GET -// presigned request. +// supportedGetReqParams - supported request parameters for GET presigned request. var supportedGetReqParams = map[string]struct{}{ - "response-expires": struct{}{}, - "response-content-type": struct{}{}, - "response-cache-control": struct{}{}, - "response-content-disposition": struct{}{}, + "response-expires": {}, + "response-content-type": {}, + "response-cache-control": {}, + "response-content-language": {}, + "response-content-encoding": {}, + "response-content-disposition": {}, } // presignURL - Returns a presigned URL for an input 'method'. // Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (urlStr string, err error) { +func (c Client) presignURL(method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { // Input validation. if method == "" { - return "", ErrInvalidArgument("method cannot be empty.") + return nil, ErrInvalidArgument("method cannot be empty.") } if err := isValidBucketName(bucketName); err != nil { - return "", err + return nil, err } if err := isValidObjectName(objectName); err != nil { - return "", err + return nil, err } if err := isValidExpiry(expires); err != nil { - return "", err + return nil, err } // Convert expires into seconds. @@ -63,11 +67,10 @@ func (c Client) presignURL(method string, bucketName string, objectName string, // Verify if input map has unsupported params, if yes exit. for k := range reqParams { if _, ok := supportedGetReqParams[k]; !ok { - return "", ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") + return nil, ErrInvalidArgument(k + " unsupported request parameter for presigned GET.") } } - // Save the request parameters to be used in presigning for - // GET request. + // Save the request parameters to be used in presigning for GET request. reqMetadata.queryValues = reqParams } @@ -75,43 +78,48 @@ func (c Client) presignURL(method string, bucketName string, objectName string, // Since expires is set newRequest will presign the request. req, err := c.newRequest(method, reqMetadata) if err != nil { - return "", err + return nil, err } - return req.URL.String(), nil + return req.URL, nil } // PresignedGetObject - Returns a presigned URL to access an object // without credentials. Expires maximum is 7days - ie. 604800 and // minimum is 1. Additionally you can override a set of response // headers using the query parameters. -func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (url string, err error) { +func (c Client) PresignedGetObject(bucketName string, objectName string, expires time.Duration, reqParams url.Values) (u *url.URL, err error) { return c.presignURL("GET", bucketName, objectName, expires, reqParams) } // PresignedPutObject - Returns a presigned URL to upload an object without credentials. // Expires maximum is 7days - ie. 604800 and minimum is 1. -func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (url string, err error) { +func (c Client) PresignedPutObject(bucketName string, objectName string, expires time.Duration) (u *url.URL, err error) { return c.presignURL("PUT", bucketName, objectName, expires, nil) } -// PresignedPostPolicy - Returns POST form data to upload an object at a location. -func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { +// PresignedPostPolicy - Returns POST urlString, form data to upload an object. +func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[string]string, err error) { // Validate input arguments. if p.expiration.IsZero() { - return nil, errors.New("Expiration time must be specified") + return nil, nil, errors.New("Expiration time must be specified") } if _, ok := p.formData["key"]; !ok { - return nil, errors.New("object key must be specified") + return nil, nil, errors.New("object key must be specified") } if _, ok := p.formData["bucket"]; !ok { - return nil, errors.New("bucket name must be specified") + return nil, nil, errors.New("bucket name must be specified") } bucketName := p.formData["bucket"] // Fetch the bucket location. location, err := c.getBucketLocation(bucketName) if err != nil { - return nil, err + return nil, nil, err + } + + u, err = c.makeTargetURL(bucketName, "", location, nil) + if err != nil { + return nil, nil, err } // Keep time. @@ -121,15 +129,15 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { policyBase64 := p.base64() p.formData["policy"] = policyBase64 // For Google endpoint set this value to be 'GoogleAccessId'. - if isGoogleEndpoint(c.endpointURL) { + if s3utils.IsGoogleEndpoint(c.endpointURL) { p.formData["GoogleAccessId"] = c.accessKeyID } else { // For all other endpoints set this value to be 'AWSAccessKeyId'. p.formData["AWSAccessKeyId"] = c.accessKeyID } // Sign the policy. - p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey) - return p.formData, nil + p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey) + return u, p.formData, nil } // Add date policy. @@ -138,7 +146,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { condition: "$x-amz-date", value: t.Format(iso8601DateFormat), }); err != nil { - return nil, err + return nil, nil, err } // Add algorithm policy. @@ -147,17 +155,17 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { condition: "$x-amz-algorithm", value: signV4Algorithm, }); err != nil { - return nil, err + return nil, nil, err } // Add a credential policy. - credential := getCredential(c.accessKeyID, location, t) + credential := s3signer.GetCredential(c.accessKeyID, location, t) if err = p.addNewPolicy(policyCondition{ matchType: "eq", condition: "$x-amz-credential", value: credential, }); err != nil { - return nil, err + return nil, nil, err } // Get base64 encoded policy. @@ -167,6 +175,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (map[string]string, error) { p.formData["x-amz-algorithm"] = signV4Algorithm p.formData["x-amz-credential"] = credential p.formData["x-amz-date"] = t.Format(iso8601DateFormat) - p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) - return p.formData, nil + p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location) + return u, p.formData, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-bucket.go b/vendor/github.com/minio/minio-go/api-put-bucket.go index 986099e34..001da6de3 100644 --- a/vendor/github.com/minio/minio-go/api-put-bucket.go +++ b/vendor/github.com/minio/minio-go/api-put-bucket.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,87 +18,113 @@ package minio import ( "bytes" + "encoding/base64" "encoding/hex" + "encoding/json" "encoding/xml" + "fmt" "io/ioutil" "net/http" "net/url" + "path" + + "github.com/minio/minio-go/pkg/policy" + "github.com/minio/minio-go/pkg/s3signer" ) /// Bucket operations -// MakeBucket makes a new bucket. -// -// Optional arguments are acl and location - by default all buckets are created -// with ``private`` acl and in US Standard region. -// -// ACL valid values - http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html +// MakeBucket creates a new bucket with bucketName. // -// private - owner gets full access [default]. -// public-read - owner gets full access, all others get read access. -// public-read-write - owner gets full access, all others get full access too. -// authenticated-read - owner gets full access, authenticated users get read access. +// Location is an optional argument, by default all buckets are +// created in US Standard Region. // // For Amazon S3 for more supported regions - http://docs.aws.amazon.com/general/latest/gr/rande.html // For Google Cloud Storage for more supported regions - https://cloud.google.com/storage/docs/bucket-locations -func (c Client) MakeBucket(bucketName string, acl BucketACL, location string) error { +func (c Client) MakeBucket(bucketName string, location string) (err error) { + defer func() { + // Save the location into cache on a successful makeBucket response. + if err == nil { + c.bucketLocCache.Set(bucketName, location) + } + }() + // Validate the input arguments. if err := isValidBucketName(bucketName); err != nil { return err } - if !acl.isValidBucketACL() { - return ErrInvalidArgument("Unrecognized ACL " + acl.String()) - } // If location is empty, treat is a default region 'us-east-1'. if location == "" { location = "us-east-1" } - // Instantiate the request. - req, err := c.makeBucketRequest(bucketName, acl, location) - if err != nil { - return err - } + // Try creating bucket with the provided region, in case of + // invalid region error let's guess the appropriate region + // from S3 API headers - // Execute the request. - resp, err := c.do(req) - defer closeResponse(resp) - if err != nil { - return err - } + // Create a done channel to control 'newRetryTimer' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + // Initialize the makeBucket request. + req, err := c.makeBucketRequest(bucketName, location) + if err != nil { + return err + } + + // Execute make bucket request. + resp, err := c.do(req) + defer closeResponse(resp) + if err != nil { + return err + } - if resp != nil { if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") + err := httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + if errResp.Code == "InvalidRegion" && errResp.Region != "" { + // Fetch bucket region found in headers + // of S3 error response, attempt bucket + // create again. + location = errResp.Region + continue + } + // Nothing to retry, fail. + return err } - } - // Save the location into cache on a successfull makeBucket response. - c.bucketLocCache.Set(bucketName, location) + // Control reaches here when bucket create was successful, + // break out. + break + } - // Return. + // Success. return nil } -// makeBucketRequest constructs request for makeBucket. -func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location string) (*http.Request, error) { +// Low level wrapper API For makeBucketRequest. +func (c Client) makeBucketRequest(bucketName string, location string) (*http.Request, error) { // Validate input arguments. if err := isValidBucketName(bucketName); err != nil { return nil, err } - if !acl.isValidBucketACL() { - return nil, ErrInvalidArgument("Unrecognized ACL " + acl.String()) - } - // In case of Amazon S3. The make bucket issued on already - // existing bucket would fail with 'AuthorizationMalformed' error - // if virtual style is used. So we default to 'path style' as that - // is the preferred method here. The final location of the - // 'bucket' is provided through XML LocationConstraint data with - // the request. - targetURL := *c.endpointURL - targetURL.Path = "/" + bucketName + "/" + // In case of Amazon S3. The make bucket issued on + // already existing bucket would fail with + // 'AuthorizationMalformed' error if virtual style is + // used. So we default to 'path style' as that is the + // preferred method here. The final location of the + // 'bucket' is provided through XML LocationConstraint + // data with the request. + targetURL := c.endpointURL + targetURL.Path = path.Join(bucketName, "") + "/" // get a new HTTP request for the method. req, err := http.NewRequest("PUT", targetURL.String(), nil) @@ -106,16 +132,11 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str return nil, err } - // by default bucket acl is set to private. - req.Header.Set("x-amz-acl", "private") - if acl != "" { - req.Header.Set("x-amz-acl", string(acl)) - } - // set UserAgent for the request. c.setUserAgent(req) - // set sha256 sum for signature calculation only with signature version '4'. + // set sha256 sum for signature calculation only with + // signature version '4'. if c.signature.isV4() { req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) } @@ -131,9 +152,12 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str } createBucketConfigBuffer := bytes.NewBuffer(createBucketConfigBytes) req.Body = ioutil.NopCloser(createBucketConfigBuffer) - req.ContentLength = int64(createBucketConfigBuffer.Len()) + req.ContentLength = int64(len(createBucketConfigBytes)) + // Set content-md5. + req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(sumMD5(createBucketConfigBytes))) if c.signature.isV4() { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBuffer.Bytes()))) + // Set sha256. + req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(createBucketConfigBytes))) } } @@ -141,69 +165,166 @@ func (c Client) makeBucketRequest(bucketName string, acl BucketACL, location str if c.signature.isV4() { // Signature calculated for MakeBucket request should be for 'us-east-1', // regardless of the bucket's location constraint. - req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") } else if c.signature.isV2() { - req = signV2(*req, c.accessKeyID, c.secretAccessKey) + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) } // Return signed request. return req, nil } -// SetBucketACL set the permissions on an existing bucket using access control lists (ACL). +// SetBucketPolicy set the access permissions on an existing bucket. // // For example // -// private - owner gets full access [default]. -// public-read - owner gets full access, all others get read access. -// public-read-write - owner gets full access, all others get full access too. -// authenticated-read - owner gets full access, authenticated users get read access. -func (c Client) SetBucketACL(bucketName string, acl BucketACL) error { +// none - owner gets full access [default]. +// readonly - anonymous get access for everyone at a given object prefix. +// readwrite - anonymous list/put/delete access to a given object prefix. +// writeonly - anonymous put/delete access to a given object prefix. +func (c Client) SetBucketPolicy(bucketName string, objectPrefix string, bucketPolicy policy.BucketPolicy) error { // Input validation. if err := isValidBucketName(bucketName); err != nil { return err } - if !acl.isValidBucketACL() { - return ErrInvalidArgument("Unrecognized ACL " + acl.String()) + if err := isValidObjectPrefix(objectPrefix); err != nil { + return err + } + + if !bucketPolicy.IsValidBucketPolicy() { + return ErrInvalidArgument(fmt.Sprintf("Invalid bucket policy provided. %s", bucketPolicy)) + } + + policyInfo, err := c.getBucketPolicy(bucketName) + errResponse := ToErrorResponse(err) + if err != nil && errResponse.Code != "NoSuchBucketPolicy" { + return err + } + + if bucketPolicy == policy.BucketPolicyNone && policyInfo.Statements == nil { + // As the request is for removing policy and the bucket + // has empty policy statements, just return success. + return nil + } + + policyInfo.Statements = policy.SetPolicy(policyInfo.Statements, bucketPolicy, bucketName, objectPrefix) + + // Save the updated policies. + return c.putBucketPolicy(bucketName, policyInfo) +} + +// Saves a new bucket policy. +func (c Client) putBucketPolicy(bucketName string, policyInfo policy.BucketAccessPolicy) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err } - // Set acl query. + // If there are no policy statements, we should remove entire policy. + if len(policyInfo.Statements) == 0 { + return c.removeBucketPolicy(bucketName) + } + + // Get resources properly escaped and lined up before + // using them in http request. urlValues := make(url.Values) - urlValues.Set("acl", "") + urlValues.Set("policy", "") - // Add misc headers. - customHeader := make(http.Header) + policyBytes, err := json.Marshal(&policyInfo) + if err != nil { + return err + } - if acl != "" { - customHeader.Set("x-amz-acl", acl.String()) - } else { - customHeader.Set("x-amz-acl", "private") + policyBuffer := bytes.NewReader(policyBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: policyBuffer, + contentLength: int64(len(policyBytes)), + contentMD5Bytes: sumMD5(policyBytes), + contentSHA256Bytes: sum256(policyBytes), } - // Instantiate a new request. - req, err := c.newRequest("PUT", requestMetadata{ - bucketName: bucketName, - queryValues: urlValues, - customHeader: customHeader, - }) + // Execute PUT to upload a new bucket policy. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) if err != nil { return err } + if resp != nil { + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, "") + } + } + return nil +} + +// Removes all policies on a bucket. +func (c Client) removeBucketPolicy(bucketName string) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("policy", "") - // Initiate the request. - resp, err := c.do(req) + // Execute DELETE on objectName. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, + }) defer closeResponse(resp) if err != nil { return err } + return nil +} + +// SetBucketNotification saves a new bucket notification. +func (c Client) SetBucketNotification(bucketName string, bucketNotification BucketNotification) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + + // Get resources properly escaped and lined up before + // using them in http request. + urlValues := make(url.Values) + urlValues.Set("notification", "") + + notifBytes, err := xml.Marshal(bucketNotification) + if err != nil { + return err + } + + notifBuffer := bytes.NewReader(notifBytes) + reqMetadata := requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: notifBuffer, + contentLength: int64(len(notifBytes)), + contentMD5Bytes: sumMD5(notifBytes), + contentSHA256Bytes: sum256(notifBytes), + } + // Execute PUT to upload a new bucket notification. + resp, err := c.executeMethod("PUT", reqMetadata) + defer closeResponse(resp) + if err != nil { + return err + } if resp != nil { - // if error return. if resp.StatusCode != http.StatusOK { return httpRespToErrorResponse(resp, bucketName, "") } } - - // return return nil } + +// RemoveAllBucketNotification - Remove bucket notification clears all previously specified config +func (c Client) RemoveAllBucketNotification(bucketName string) error { + return c.SetBucketNotification(bucketName, BucketNotification{}) +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-common.go b/vendor/github.com/minio/minio-go/api-put-object-common.go index 7c73b24c3..68a459f4a 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-common.go +++ b/vendor/github.com/minio/minio-go/api-put-object-common.go @@ -17,10 +17,10 @@ package minio import ( - "crypto/md5" - "crypto/sha256" + "fmt" "hash" "io" + "io/ioutil" "math" "os" ) @@ -44,18 +44,17 @@ func isReadAt(reader io.Reader) (ok bool) { } // shouldUploadPart - verify if part should be uploaded. -func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool { +func shouldUploadPart(objPart ObjectPart, uploadReq uploadPartReq) bool { // If part not found should upload the part. - uploadedPart, found := objectParts[objPart.PartNumber] - if !found { + if uploadReq.Part == nil { return true } // if size mismatches should upload the part. - if objPart.Size != uploadedPart.Size { + if objPart.Size != uploadReq.Part.Size { return true } // if md5sum mismatches should upload the part. - if objPart.ETag != uploadedPart.ETag { + if objPart.ETag != uploadReq.Part.ETag { return true } return false @@ -68,7 +67,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool { // object storage it will have the following parameters as constants. // // maxPartsCount - 10000 -// minPartSize - 5MiB +// minPartSize - 64MiB // maxMultipartPutObjectSize - 5TiB // func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) { @@ -94,102 +93,60 @@ func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, las return totalPartsCount, partSize, lastPartSize, nil } -// Compatibility code for Golang < 1.5.x. -// copyBuffer is identical to io.CopyBuffer, since such a function is -// not available/implemented in Golang version < 1.5.x, we use a -// custom call exactly implementng io.CopyBuffer from Golang > 1.5.x -// version does. +// hashCopyBuffer is identical to hashCopyN except that it doesn't take +// any size argument but takes a buffer argument and reader should be +// of io.ReaderAt interface. // -// copyBuffer stages through the provided buffer (if one is required) -// rather than allocating a temporary one. If buf is nil, one is -// allocated; otherwise if it has zero length, copyBuffer panics. -// -// FIXME: Remove this code when distributions move to newer Golang versions. -func copyBuffer(writer io.Writer, reader io.Reader, buf []byte) (written int64, err error) { - // If the reader has a WriteTo method, use it to do the copy. - // Avoids an allocation and a copy. - if wt, ok := reader.(io.WriterTo); ok { - return wt.WriteTo(writer) - } - // Similarly, if the writer has a ReadFrom method, use it to do - // the copy. - if rt, ok := writer.(io.ReaderFrom); ok { - return rt.ReadFrom(reader) +// Stages reads from offsets into the buffer, if buffer is nil it is +// initialized to optimalBufferSize. +func hashCopyBuffer(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.ReaderAt, buf []byte) (size int64, err error) { + hashWriter := writer + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) } + + // Buffer is nil, initialize. if buf == nil { - buf = make([]byte, 32*1024) + buf = make([]byte, optimalReadBufferSize) } + + // Offset to start reading from. + var readAtOffset int64 + + // Following block reads data at an offset from the input + // reader and copies data to into local temporary file. for { - nr, er := reader.Read(buf) - if nr > 0 { - nw, ew := writer.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break + readAtSize, rerr := reader.ReadAt(buf, readAtOffset) + if rerr != nil { + if rerr != io.EOF { + return 0, rerr } } - if er == io.EOF { - break + writeSize, werr := hashWriter.Write(buf[:readAtSize]) + if werr != nil { + return 0, werr + } + if readAtSize != writeSize { + return 0, fmt.Errorf("Read size was not completely written to writer. wanted %d, got %d - %s", readAtSize, writeSize, reportIssue) } - if er != nil { - err = er + readAtOffset += int64(writeSize) + size += int64(writeSize) + if rerr == io.EOF { break } } - return written, err -} - -// hashCopyBuffer is identical to hashCopyN except that it stages -// through the provided buffer (if one is required) rather than -// allocating a temporary one. If buf is nil, one is allocated for 5MiB. -func (c Client) hashCopyBuffer(writer io.Writer, reader io.Reader, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and SHA256 hasher. - var hashMD5, hashSHA256 hash.Hash - // MD5 and SHA256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(writer, hashMD5) - if c.signature.isV4() { - hashSHA256 = sha256.New() - hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) - } - - // Allocate buf if not initialized. - if buf == nil { - buf = make([]byte, optimalReadBufferSize) - } - - // Using copyBuffer to copy in large buffers, default buffer - // for io.Copy of 32KiB is too small. - size, err = copyBuffer(hashWriter, reader, buf) - if err != nil { - return nil, nil, 0, err - } - // Finalize md5 sum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSHA256.Sum(nil) + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) } - return md5Sum, sha256Sum, size, err + return size, err } -// hashCopyN - Calculates Md5sum and SHA256sum for up to partSize amount of bytes. -func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and SHA256 hasher. - var hashMD5, hashSHA256 hash.Hash - // MD5 and SHA256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(writer, hashMD5) - if c.signature.isV4() { - hashSHA256 = sha256.New() - hashWriter = io.MultiWriter(writer, hashMD5, hashSHA256) +// hashCopyN - Calculates chosen hashes up to partSize amount of bytes. +func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, writer io.Writer, reader io.Reader, partSize int64) (size int64, err error) { + hashWriter := writer + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) } // Copies to input at writer. @@ -197,96 +154,98 @@ func (c Client) hashCopyN(writer io.Writer, reader io.Reader, partSize int64) (m if err != nil { // If not EOF return error right here. if err != io.EOF { - return nil, nil, 0, err + return 0, err } } - // Finalize md5shum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSHA256.Sum(nil) + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) } - return md5Sum, sha256Sum, size, err + return size, err } // getUploadID - fetch upload id if already present for an object name // or initiate a new request to fetch a new upload id. -func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) { +func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { - return "", false, err + return "", err } if err := isValidObjectName(objectName); err != nil { - return "", false, err + return "", err } - // Set content Type to default if empty string. - if contentType == "" { - contentType = "application/octet-stream" + // Initiate multipart upload for an object. + initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData) + if err != nil { + return "", err } + return initMultipartUploadResult.UploadID, nil +} + +// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session +// or initiate a new multipart session if no current one found +func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]ObjectPart, error) { + // A map of all uploaded parts. + var partsInfo map[int]ObjectPart + var err error - // Find upload id for previous upload for an object. - uploadID, err = c.findUploadID(bucketName, objectName) + uploadID, err := c.findUploadID(bucketName, objectName) if err != nil { - return "", false, err + return "", nil, err } + if uploadID == "" { - // Initiate multipart upload for an object. - initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType) + // Initiates a new multipart request + uploadID, err = c.newUploadID(bucketName, objectName, metaData) + if err != nil { + return "", nil, err + } + } else { + // Fetch previously upload parts and maximum part size. + partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) if err != nil { - return "", false, err + // When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id, + // initiate a new multipart upload + if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" { + uploadID, err = c.newUploadID(bucketName, objectName, metaData) + if err != nil { + return "", nil, err + } + } else { + return "", nil, err + } } - // Save the new upload id. - uploadID = initMultipartUploadResult.UploadID - // Indicate that this is a new upload id. - isNew = true } - return uploadID, isNew, nil + + // Allocate partsInfo if not done yet + if partsInfo == nil { + partsInfo = make(map[int]ObjectPart) + } + + return uploadID, partsInfo, nil } -// computeHashBuffer - Calculates MD5 and SHA256 for an input read -// Seeker is identical to computeHash except that it stages -// through the provided buffer (if one is required) rather than -// allocating a temporary one. If buf is nil, it uses a temporary -// buffer. -func (c Client) computeHashBuffer(reader io.ReadSeeker, buf []byte) (md5Sum, sha256Sum []byte, size int64, err error) { - // MD5 and SHA256 hasher. - var hashMD5, hashSHA256 hash.Hash - // MD5 and SHA256 hasher. - hashMD5 = md5.New() - hashWriter := io.MultiWriter(hashMD5) - if c.signature.isV4() { - hashSHA256 = sha256.New() - hashWriter = io.MultiWriter(hashMD5, hashSHA256) +// computeHash - Calculates hashes for an input read Seeker. +func computeHash(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte, reader io.ReadSeeker) (size int64, err error) { + hashWriter := ioutil.Discard + for _, v := range hashAlgorithms { + hashWriter = io.MultiWriter(hashWriter, v) } // If no buffer is provided, no need to allocate just use io.Copy. - if buf == nil { - size, err = io.Copy(hashWriter, reader) - if err != nil { - return nil, nil, 0, err - } - } else { - size, err = copyBuffer(hashWriter, reader, buf) - if err != nil { - return nil, nil, 0, err - } + size, err = io.Copy(hashWriter, reader) + if err != nil { + return 0, err } // Seek back reader to the beginning location. if _, err := reader.Seek(0, 0); err != nil { - return nil, nil, 0, err + return 0, err } - // Finalize md5shum and sha256 sum. - md5Sum = hashMD5.Sum(nil) - if c.signature.isV4() { - sha256Sum = hashSHA256.Sum(nil) + for k, v := range hashAlgorithms { + hashSums[k] = v.Sum(nil) } - return md5Sum, sha256Sum, size, nil -} - -// computeHash - Calculates MD5 and SHA256 for an input read Seeker. -func (c Client) computeHash(reader io.ReadSeeker) (md5Sum, sha256Sum []byte, size int64, err error) { - return c.computeHashBuffer(reader, nil) + return size, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-object-copy.go b/vendor/github.com/minio/minio-go/api-put-object-copy.go new file mode 100644 index 000000000..56978d427 --- /dev/null +++ b/vendor/github.com/minio/minio-go/api-put-object-copy.go @@ -0,0 +1,72 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + + "github.com/minio/minio-go/pkg/s3utils" +) + +// CopyObject - copy a source object into a new object with the provided name in the provided bucket +func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return err + } + if err := isValidObjectName(objectName); err != nil { + return err + } + if objectSource == "" { + return ErrInvalidArgument("Object source cannot be empty.") + } + + // customHeaders apply headers. + customHeaders := make(http.Header) + for _, cond := range cpCond.conditions { + customHeaders.Set(cond.key, cond.value) + } + + // Set copy source. + customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource)) + + // Execute PUT on objectName. + resp, err := c.executeMethod("PUT", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + customHeader: customHeaders, + }) + defer closeResponse(resp) + if err != nil { + return err + } + if resp != nil { + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + + // Decode copy response on success. + cpObjRes := copyObjectResult{} + err = xmlDecoder(resp.Body, &cpObjRes) + if err != nil { + return err + } + + // Return nil on success. + return nil +} diff --git a/vendor/github.com/minio/minio-go/api-put-object-file.go b/vendor/github.com/minio/minio-go/api-put-object-file.go index d212dfb87..09fec769d 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-file.go +++ b/vendor/github.com/minio/minio-go/api-put-object-file.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,12 +17,19 @@ package minio import ( + "crypto/md5" + "crypto/sha256" "encoding/hex" "fmt" + "hash" "io" "io/ioutil" + "mime" "os" + "path/filepath" "sort" + + "github.com/minio/minio-go/pkg/s3utils" ) // FPutObject - Create an object in a bucket, with contents from file at filePath. @@ -57,9 +64,21 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName) } + objMetadata := make(map[string][]string) + + // Set contentType based on filepath extension if not given or default + // value of "binary/octet-stream" if the extension has no associated type. + if contentType == "" { + if contentType = mime.TypeByExtension(filepath.Ext(filePath)); contentType == "" { + contentType = "application/octet-stream" + } + } + + objMetadata["Content-Type"] = []string{contentType} + // NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs. // Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers. - if isGoogleEndpoint(c.endpointURL) { + if s3utils.IsGoogleEndpoint(c.endpointURL) { if fileSize > int64(maxSinglePutObjectSize) { return 0, ErrorResponse{ Code: "NotImplemented", @@ -69,30 +88,16 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) } } // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil) - } - - // NOTE: S3 doesn't allow anonymous multipart requests. - if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if fileSize > int64(maxSinglePutObjectSize) { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: fmt.Sprintf("For anonymous requests Content-Length cannot be %d.", fileSize), - Key: objectName, - BucketName: bucketName, - } - } - // Do not compute MD5 for anonymous requests to Amazon - // S3. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil) + return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } // Small object upload is initiated for uploads for input data size smaller than 5MiB. if fileSize < minPartSize && fileSize >= 0 { - return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil) + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } + // Upload all large objects as multipart. - n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil) + n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not @@ -103,7 +108,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil) + return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil) } return n, err } @@ -118,7 +123,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string) // against MD5SUM of each individual parts. This function also // effectively utilizes file system capabilities of reading from // specific sections and not having to create temporary files. -func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) { +func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -127,9 +132,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe return 0, err } - // Get upload id for an object, initiates a new multipart request - // if it cannot find any previously partially uploaded object. - uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) if err != nil { return 0, err } @@ -138,99 +142,149 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe var totalUploadedSize int64 // Complete multipart upload. - var completeMultipartUpload completeMultipartUpload - - // A map of all uploaded parts. - var partsInfo = make(map[int]objectPart) - - // If this session is a continuation of a previous session fetch all - // previously uploaded parts info. - if !isNew { - // Fetch previously upload parts and maximum part size. - partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - } + var complMultipartUpload completeMultipartUpload // Calculate the optimal parts info for a given size. - totalPartsCount, partSize, _, err := optimalPartInfo(fileSize) + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize) if err != nil { return 0, err } - // Part number always starts with '1'. - partNumber := 1 + // Create a channel to communicate a part was uploaded. + // Buffer this to 10000, the maximum number of parts allowed by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) - for partNumber <= totalPartsCount { - // Get a section reader on a particular offset. - sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize) + // Create a channel to communicate which part to upload. + // Buffer this to 10000, the maximum number of parts allowed by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) - // Calculates MD5 and SHA256 sum for a section reader. - var md5Sum, sha256Sum []byte - var prtSize int64 - md5Sum, sha256Sum, prtSize, err = c.computeHash(sectionReader) - if err != nil { - return 0, err - } + // Just for readability. + lastPartNumber := totalPartsCount - var reader io.Reader - // Update progress reader appropriately to the latest offset - // as we read from the source. - reader = newHook(sectionReader, progress) - - // Verify if part should be uploaded. - if shouldUploadPart(objectPart{ - ETag: hex.EncodeToString(md5Sum), - PartNumber: partNumber, - Size: prtSize, - }, partsInfo) { - // Proceed to upload the part. - var objPart objectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber, - md5Sum, sha256Sum, prtSize) - if err != nil { - return totalUploadedSize, err - } - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart + // Send each part through the partUploadCh to be uploaded. + for p := 1; p <= totalPartsCount; p++ { + part, ok := partsInfo[p] + if ok { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} } else { - // Update the progress reader for the skipped part. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil { - return totalUploadedSize, err - } - } + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} } + } + close(uploadPartsCh) - // Save successfully uploaded size. - totalUploadedSize += prtSize + // Use three 'workers' to upload parts in parallel. + for w := 1; w <= totalWorkers; w++ { + go func() { + // Deal with each part as it comes through the channel. + for uploadReq := range uploadPartsCh { + // Add hash algorithms that need to be calculated by computeHash() + // In case of a non-v4 signature or https connection, sha256 is not needed. + hashAlgos := make(map[string]hash.Hash) + hashSums := make(map[string][]byte) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } - // Increment part number. - partNumber++ - } + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + missingPartSize := partSize - // Verify if we uploaded all data. - if totalUploadedSize != fileSize { - return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (fileSize - lastPartSize) + missingPartSize = lastPartSize + } + + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize) + var prtSize int64 + var err error + + prtSize, err = computeHash(hashAlgos, hashSums, sectionReader) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + + // Create the part to be uploaded. + verifyObjPart := ObjectPart{ + ETag: hex.EncodeToString(hashSums["md5"]), + PartNumber: uploadReq.PartNum, + Size: partSize, + } + + // If this is the last part do not give it the full part size. + if uploadReq.PartNum == lastPartNumber { + verifyObjPart.Size = lastPartSize + } + + // Verify if part should be uploaded. + if shouldUploadPart(verifyObjPart, uploadReq) { + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Error: err, + } + // Exit the goroutine. + return + } + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + } + // Return through the channel the part size. + uploadedPartsCh <- uploadedPartRes{ + Size: verifyObjPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, + } + } + }() } - // Loop over uploaded parts to save them in a Parts array before completing the multipart request. - for _, part := range partsInfo { - var complPart completePart - complPart.ETag = part.ETag - complPart.PartNumber = part.PartNumber - completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart) + // Retrieve each uploaded part once it is done. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error + } + // Retrieve each uploaded part and store it to be completed. + part := uploadRes.Part + if part == nil { + return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) + } + // Update the total uploaded size. + totalUploadedSize += uploadRes.Size + // Update the progress bar if there is one. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { + return totalUploadedSize, err + } + } + // Store the part to be completed. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) } - // Verify if totalPartsCount is not equal to total list of parts. - if totalPartsCount != len(completeMultipartUpload.Parts) { - return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts)) + // Verify if we uploaded all data. + if totalUploadedSize != fileSize { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName) } // Sort all completed parts. - sort.Sort(completedParts(completeMultipartUpload.Parts)) - _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload) + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) if err != nil { return totalUploadedSize, err } diff --git a/vendor/github.com/minio/minio-go/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/api-put-object-multipart.go index ee0019165..3a299f65b 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/api-put-object-multipart.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,8 +18,12 @@ package minio import ( "bytes" + "crypto/md5" + "crypto/sha256" "encoding/hex" "encoding/xml" + "fmt" + "hash" "io" "io/ioutil" "net/http" @@ -41,11 +45,11 @@ import ( // If we exhaust all the known types, code proceeds to use stream as // is where each part is re-downloaded, checksummed and verified // before upload. -func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { - if size > 0 && size >= minPartSize { +func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + if size > 0 && size > minPartSize { // Verify if reader is *os.File, then use file system functionalities. if isFile(reader) { - return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress) + return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress) } // Verify if reader is *minio.Object or io.ReaderAt. // NOTE: Verification of object is kept for a specific purpose @@ -54,17 +58,21 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read // and such a functionality is used in the subsequent code // path. if isObject(reader) || isReadAt(reader) { - return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress) + return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress) } } // For any other data size and reader type we do generic multipart // approach by staging data in temporary files and uploading them. - return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress) + return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress) } -// putObjectStream uploads files bigger than 5MiB, and also supports -// special case where size is unknown i.e '-1'. -func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { +// putObjectMultipartStreamNoChecksum - upload a large object using +// multipart upload and streaming signature for signing payload. +// N B We don't resume an incomplete multipart upload, we overwrite +// existing parts of an incomplete upload. +func (c Client) putObjectMultipartStreamNoChecksum(bucketName, objectName string, + reader io.Reader, size int64, metadata map[string][]string, progress io.Reader) (int64, error) { + // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -73,30 +81,118 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i return 0, err } + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, err := c.findUploadID(bucketName, objectName) + if err != nil { + return 0, err + } + if uploadID == "" { + // Initiates a new multipart request + uploadID, err = c.newUploadID(bucketName, objectName, metadata) + if err != nil { + return 0, err + } + } + + // Calculate the optimal parts info for a given size. + totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) + if err != nil { + return 0, err + } + // Total data read and written to server. should be equal to 'size' at the end of the call. var totalUploadedSize int64 + // Initialize parts uploaded map. + partsInfo := make(map[int]ObjectPart) + + // Part number always starts with '1'. + var partNumber int + for partNumber = 1; partNumber <= totalPartsCount; partNumber++ { + // Update progress reader appropriately to the latest offset + // as we read from the source. + hookReader := newHook(reader, progress) + + // Proceed to upload the part. + if partNumber == totalPartsCount { + partSize = lastPartSize + } + + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, + io.LimitReader(hookReader, partSize), partNumber, nil, nil, partSize) + // For unknown size, Read EOF we break away. + // We do not have to upload till totalPartsCount. + if err == io.EOF && size < 0 { + break + } + + if err != nil { + return totalUploadedSize, err + } + + // Save successfully uploaded part metadata. + partsInfo[partNumber] = objPart + + // Save successfully uploaded size. + totalUploadedSize += partSize + } + + // Verify if we uploaded all the data. + if size > 0 { + if totalUploadedSize != size { + return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) + } + } + // Complete multipart upload. var complMultipartUpload completeMultipartUpload - // A map of all previously uploaded parts. - var partsInfo = make(map[int]objectPart) + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) + } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) + } - // getUploadID for an object, initiates a new multipart request - // if it cannot find any previously partially uploaded object. - uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + // Sort all completed parts. + sort.Sort(completedParts(complMultipartUpload.Parts)) + _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) if err != nil { + return totalUploadedSize, err + } + + // Return final size. + return totalUploadedSize, nil +} + +// putObjectStream uploads files bigger than 64MiB, and also supports +// special case where size is unknown i.e '-1'. +func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return 0, err + } + if err := isValidObjectName(objectName); err != nil { return 0, err } - // If This session is a continuation of a previous session fetch all - // previously uploaded parts info. - if !isNew { - // Fetch previously uploaded parts and maximum part size. - partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } + // Total data read and written to server. should be equal to 'size' at the end of the call. + var totalUploadedSize int64 + + // Complete multipart upload. + var complMultipartUpload completeMultipartUpload + + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) + if err != nil { + return 0, err } // Calculate the optimal parts info for a given size. @@ -112,13 +208,19 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i tmpBuffer := new(bytes.Buffer) for partNumber <= totalPartsCount { - // Calculates MD5 and SHA256 sum while copying partSize bytes - // into tmpBuffer. - md5Sum, sha256Sum, prtSize, rErr := c.hashCopyN(tmpBuffer, reader, partSize) - if rErr != nil { - if rErr != io.EOF { - return 0, rErr - } + // Choose hash algorithms to be calculated by hashCopyN, avoid sha256 + // with non-v4 signature request or HTTPS connection + hashSums := make(map[string][]byte) + hashAlgos := make(map[string]hash.Hash) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + + // Calculates hash sums while copying partSize bytes into tmpBuffer. + prtSize, rErr := hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, partSize) + if rErr != nil && rErr != io.EOF { + return 0, rErr } var reader io.Reader @@ -126,16 +228,17 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // as we read from the source. reader = newHook(tmpBuffer, progress) + part, ok := partsInfo[partNumber] + // Verify if part should be uploaded. - if shouldUploadPart(objectPart{ - ETag: hex.EncodeToString(md5Sum), + if !ok || shouldUploadPart(ObjectPart{ + ETag: hex.EncodeToString(hashSums["md5"]), PartNumber: partNumber, Size: prtSize, - }, partsInfo) { + }, uploadPartReq{PartNum: partNumber, Part: &part}) { // Proceed to upload the part. - var objPart objectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), partNumber, - md5Sum, sha256Sum, prtSize) + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize) if err != nil { // Reset the temporary buffer upon any error. tmpBuffer.Reset() @@ -158,14 +261,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i // Save successfully uploaded size. totalUploadedSize += prtSize + // Increment part number. + partNumber++ + // For unknown size, Read EOF we break away. // We do not have to upload till totalPartsCount. if size < 0 && rErr == io.EOF { break } - - // Increment part number. - partNumber++ } // Verify if we uploaded all the data. @@ -175,19 +278,17 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i } } - // Loop over uploaded parts to save them in a Parts array before completing the multipart request. - for _, part := range partsInfo { - var complPart completePart - complPart.ETag = part.ETag - complPart.PartNumber = part.PartNumber - complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) - } - - if size > 0 { - // Verify if totalPartsCount is not equal to total list of parts. - if totalPartsCount != len(complMultipartUpload.Parts) { - return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts)) + // Loop over total uploaded parts to save them in + // Parts array before completing the multipart request. + for i := 1; i < partNumber; i++ { + part, ok := partsInfo[i] + if !ok { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i)) } + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) } // Sort all completed parts. @@ -202,7 +303,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i } // initiateMultipartUpload - Initiates a multipart upload and returns an upload ID. -func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) { +func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return initiateMultipartUploadResult{}, err @@ -215,13 +316,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri urlValues := make(url.Values) urlValues.Set("uploads", "") - if contentType == "" { - contentType = "application/octet-stream" - } - // Set ContentType header. customHeader := make(http.Header) - customHeader.Set("Content-Type", contentType) + for k, v := range metaData { + if len(v) > 0 { + customHeader.Set(k, v[0]) + } + } + + // Set a default content-type header if the latter is not provided + if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + customHeader.Set("Content-Type", "application/octet-stream") + } reqMetadata := requestMetadata{ bucketName: bucketName, @@ -230,14 +336,8 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri customHeader: customHeader, } - // Instantiate the request. - req, err := c.newRequest("POST", reqMetadata) - if err != nil { - return initiateMultipartUploadResult{}, err - } - - // Execute the request. - resp, err := c.do(req) + // Execute POST on an objectName to initiate multipart upload. + resp, err := c.executeMethod("POST", reqMetadata) defer closeResponse(resp) if err != nil { return initiateMultipartUploadResult{}, err @@ -257,25 +357,25 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri } // uploadPart - Uploads a part in a multipart upload. -func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.ReadCloser, partNumber int, md5Sum, sha256Sum []byte, size int64) (objectPart, error) { +func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Reader, partNumber int, md5Sum, sha256Sum []byte, size int64) (ObjectPart, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { - return objectPart{}, err + return ObjectPart{}, err } if err := isValidObjectName(objectName); err != nil { - return objectPart{}, err + return ObjectPart{}, err } if size > maxPartSize { - return objectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) + return ObjectPart{}, ErrEntityTooLarge(size, maxPartSize, bucketName, objectName) } if size <= -1 { - return objectPart{}, ErrEntityTooSmall(size, bucketName, objectName) + return ObjectPart{}, ErrEntityTooSmall(size, bucketName, objectName) } if partNumber <= 0 { - return objectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") + return ObjectPart{}, ErrInvalidArgument("Part number cannot be negative or equal to zero.") } if uploadID == "" { - return objectPart{}, ErrInvalidArgument("UploadID cannot be empty.") + return ObjectPart{}, ErrInvalidArgument("UploadID cannot be empty.") } // Get resources properly escaped and lined up before using them in http request. @@ -295,24 +395,19 @@ func (c Client) uploadPart(bucketName, objectName, uploadID string, reader io.Re contentSHA256Bytes: sha256Sum, } - // Instantiate a request. - req, err := c.newRequest("PUT", reqMetadata) - if err != nil { - return objectPart{}, err - } - // Execute the request. - resp, err := c.do(req) + // Execute PUT on each part. + resp, err := c.executeMethod("PUT", reqMetadata) defer closeResponse(resp) if err != nil { - return objectPart{}, err + return ObjectPart{}, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return objectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) + return ObjectPart{}, httpRespToErrorResponse(resp, bucketName, objectName) } } // Once successfully uploaded, return completed part. - objPart := objectPart{} + objPart := ObjectPart{} objPart.Size = size objPart.PartNumber = partNumber // Trim off the odd double quotes from ETag in the beginning and end. @@ -342,24 +437,18 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, } // Instantiate all the complete multipart buffer. - completeMultipartUploadBuffer := bytes.NewBuffer(completeMultipartUploadBytes) + completeMultipartUploadBuffer := bytes.NewReader(completeMultipartUploadBytes) reqMetadata := requestMetadata{ bucketName: bucketName, objectName: objectName, queryValues: urlValues, - contentBody: ioutil.NopCloser(completeMultipartUploadBuffer), - contentLength: int64(completeMultipartUploadBuffer.Len()), - contentSHA256Bytes: sum256(completeMultipartUploadBuffer.Bytes()), - } - - // Instantiate the request. - req, err := c.newRequest("POST", reqMetadata) - if err != nil { - return completeMultipartUploadResult{}, err + contentBody: completeMultipartUploadBuffer, + contentLength: int64(len(completeMultipartUploadBytes)), + contentSHA256Bytes: sum256(completeMultipartUploadBytes), } - // Execute the request. - resp, err := c.do(req) + // Execute POST to complete multipart upload for an objectName. + resp, err := c.executeMethod("POST", reqMetadata) defer closeResponse(resp) if err != nil { return completeMultipartUploadResult{}, err @@ -369,11 +458,32 @@ func (c Client) completeMultipartUpload(bucketName, objectName, uploadID string, return completeMultipartUploadResult{}, httpRespToErrorResponse(resp, bucketName, objectName) } } + + // Read resp.Body into a []bytes to parse for Error response inside the body + var b []byte + b, err = ioutil.ReadAll(resp.Body) + if err != nil { + return completeMultipartUploadResult{}, err + } // Decode completed multipart upload response on success. completeMultipartUploadResult := completeMultipartUploadResult{} - err = xmlDecoder(resp.Body, &completeMultipartUploadResult) + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadResult) if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment return completeMultipartUploadResult, err + } else if completeMultipartUploadResult.Bucket == "" { + // xml's Decode method ignores well-formed xml that don't apply to the type of value supplied. + // In this case, it would leave completeMultipartUploadResult with the corresponding zero-values + // of the members. + + // Decode completed multipart upload response on failure + completeMultipartUploadErr := ErrorResponse{} + err = xmlDecoder(bytes.NewReader(b), &completeMultipartUploadErr) + if err != nil { + // xml parsing failure due to presence an ill-formed xml fragment + return completeMultipartUploadResult, err + } + return completeMultipartUploadResult, completeMultipartUploadErr } return completeMultipartUploadResult, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-object-progress.go b/vendor/github.com/minio/minio-go/api-put-object-progress.go index ae4425d49..f3844127e 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-progress.go +++ b/vendor/github.com/minio/minio-go/api-put-object-progress.go @@ -16,10 +16,46 @@ package minio -import "io" +import ( + "io" + "strings" -// PutObjectWithProgress - With progress. + "github.com/minio/minio-go/pkg/encrypt" + "github.com/minio/minio-go/pkg/s3utils" +) + +// PutObjectWithProgress - with progress. func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) { + metaData := make(map[string][]string) + metaData["Content-Type"] = []string{contentType} + return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress) +} + +// PutEncryptedObject - Encrypt and store object. +func (c Client) PutEncryptedObject(bucketName, objectName string, reader io.Reader, encryptMaterials encrypt.Materials, metaData map[string][]string, progress io.Reader) (n int64, err error) { + + if encryptMaterials == nil { + return 0, ErrInvalidArgument("Unable to recognize empty encryption properties") + } + + if err := encryptMaterials.SetupEncryptMode(reader); err != nil { + return 0, err + } + + if metaData == nil { + metaData = make(map[string][]string) + } + + // Set the necessary encryption headers, for future decryption. + metaData[amzHeaderIV] = []string{encryptMaterials.GetIV()} + metaData[amzHeaderKey] = []string{encryptMaterials.GetKey()} + metaData[amzHeaderMatDesc] = []string{encryptMaterials.GetDesc()} + + return c.PutObjectWithMetadata(bucketName, objectName, encryptMaterials, metaData, progress) +} + +// PutObjectWithMetadata - with metadata. +func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -47,7 +83,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. - if isGoogleEndpoint(c.endpointURL) { + if s3utils.IsGoogleEndpoint(c.endpointURL) { if size <= -1 { return 0, ErrorResponse{ Code: "NotImplemented", @@ -60,46 +96,106 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress) + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress) } - // NOTE: S3 doesn't allow anonymous multipart requests. - if isAmazonEndpoint(c.endpointURL) && c.anonymous { - if size <= -1 { - return 0, ErrorResponse{ - Code: "NotImplemented", - Message: "Content-Length cannot be negative for anonymous requests.", - Key: objectName, - BucketName: bucketName, + // putSmall object. + if size < minPartSize && size >= 0 { + return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) + } + // For all sizes greater than 5MiB do multipart. + n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress) + if err != nil { + errResp := ToErrorResponse(err) + // Verify if multipart functionality is not available, if not + // fall back to single PutObject operation. + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { + // Verify if size of reader is greater than '5GiB'. + if size > maxSinglePutObjectSize { + return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } + // Fall back to uploading as single PutObject operation. + return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress) } - if size > maxSinglePutObjectSize { - return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) + return n, err + } + return n, nil +} + +// PutObjectStreaming using AWS streaming signature V4 +func (c Client) PutObjectStreaming(bucketName, objectName string, reader io.Reader) (n int64, err error) { + return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, nil, nil) +} + +// PutObjectStreamingWithMetadata using AWS streaming signature V4 +func (c Client) PutObjectStreamingWithMetadata(bucketName, objectName string, reader io.Reader, metadata map[string][]string) (n int64, err error) { + return c.PutObjectStreamingWithProgress(bucketName, objectName, reader, metadata, nil) +} + +// PutObjectStreamingWithProgress using AWS streaming signature V4 +func (c Client) PutObjectStreamingWithProgress(bucketName, objectName string, reader io.Reader, metadata map[string][]string, progress io.Reader) (n int64, err error) { + // NOTE: Streaming signature is not supported by GCS. + if s3utils.IsGoogleEndpoint(c.endpointURL) { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "AWS streaming signature v4 is not supported with Google Cloud Storage", + Key: objectName, + BucketName: bucketName, + } + } + // This method should return error with signature v2 minioClient. + if c.signature.isV2() { + return 0, ErrorResponse{ + Code: "NotImplemented", + Message: "AWS streaming signature v4 is not supported with minio client initialized for AWS signature v2", + Key: objectName, + BucketName: bucketName, } - // Do not compute MD5 for anonymous requests to Amazon - // S3. Uploads up to 5GiB in size. - return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress) } - // putSmall object. + // Size of the object. + var size int64 + + // Get reader size. + size, err = getReaderSize(reader) + if err != nil { + return 0, err + } + + // Check for largest object size allowed. + if size > int64(maxMultipartPutObjectSize) { + return 0, ErrEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) + } + + // If size cannot be found on a stream, it is not possible + // to upload using streaming signature, fall back to multipart. + if size < 0 { + return c.putObjectMultipartStream(bucketName, objectName, reader, size, metadata, progress) + } + + // Set signature type to streaming signature v4. + c.signature = SignatureV4Streaming + if size < minPartSize && size >= 0 { - return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress) + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) } - // For all sizes greater than 5MiB do multipart. - n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress) + + // For all sizes greater than 64MiB do multipart. + n, err = c.putObjectMultipartStreamNoChecksum(bucketName, objectName, reader, size, metadata, progress) if err != nil { errResp := ToErrorResponse(err) // Verify if multipart functionality is not available, if not // fall back to single PutObject operation. - if errResp.Code == "NotImplemented" { + if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") { // Verify if size of reader is greater than '5GiB'. if size > maxSinglePutObjectSize { return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } // Fall back to uploading as single PutObject operation. - return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress) + return c.putObjectNoChecksum(bucketName, objectName, reader, size, metadata, progress) } return n, err } + return n, nil } diff --git a/vendor/github.com/minio/minio-go/api-put-object-readat.go b/vendor/github.com/minio/minio-go/api-put-object-readat.go index ddb1ab3dc..ebf422638 100644 --- a/vendor/github.com/minio/minio-go/api-put-object-readat.go +++ b/vendor/github.com/minio/minio-go/api-put-object-readat.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,20 +18,36 @@ package minio import ( "bytes" + "crypto/md5" + "crypto/sha256" + "fmt" + "hash" "io" "io/ioutil" "sort" ) +// uploadedPartRes - the response received from a part upload. +type uploadedPartRes struct { + Error error // Any error encountered while uploading the part. + PartNum int // Number of the part uploaded. + Size int64 // Size of the part uploaded. + Part *ObjectPart +} + +type uploadPartReq struct { + PartNum int // Number of the part uploaded. + Part *ObjectPart // Size of the part uploaded. +} + // shouldUploadPartReadAt - verify if part should be uploaded. -func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool { +func shouldUploadPartReadAt(objPart ObjectPart, uploadReq uploadPartReq) bool { // If part not found part should be uploaded. - uploadedPart, found := objectParts[objPart.PartNumber] - if !found { + if uploadReq.Part == nil { return true } // if size mismatches part should be uploaded. - if uploadedPart.Size != objPart.Size { + if uploadReq.Part.Size != objPart.Size { return true } return false @@ -47,7 +63,7 @@ func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) // temporary files for staging all the data, these temporary files are // cleaned automatically when the caller i.e http client closes the // stream after uploading all the contents successfully. -func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -56,9 +72,8 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read return 0, err } - // Get upload id for an object, initiates a new multipart request - // if it cannot find any previously partially uploaded object. - uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType) + // Get the upload id of a previously partially uploaded object or initiate a new multipart upload + uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData) if err != nil { return 0, err } @@ -69,121 +84,150 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read // Complete multipart upload. var complMultipartUpload completeMultipartUpload - // A map of all uploaded parts. - var partsInfo = make(map[int]objectPart) - - // Fetch all parts info previously uploaded. - if !isNew { - partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID) - if err != nil { - return 0, err - } - } - // Calculate the optimal parts info for a given size. totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size) if err != nil { return 0, err } - // Used for readability, lastPartNumber is always - // totalPartsCount. + // Used for readability, lastPartNumber is always totalPartsCount. lastPartNumber := totalPartsCount - // partNumber always starts with '1'. - partNumber := 1 + // Declare a channel that sends the next part number to be uploaded. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadPartsCh := make(chan uploadPartReq, 10000) + + // Declare a channel that sends back the response of a part upload. + // Buffered to 10000 because thats the maximum number of parts allowed + // by S3. + uploadedPartsCh := make(chan uploadedPartRes, 10000) + + // Send each part number to the channel to be processed. + for p := 1; p <= totalPartsCount; p++ { + part, ok := partsInfo[p] + if ok { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part} + } else { + uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil} + } + } + close(uploadPartsCh) + + // Receive each part number from the channel allowing three parallel uploads. + for w := 1; w <= totalWorkers; w++ { + go func() { + // Read defaults to reading at 5MiB buffer. + readAtBuffer := make([]byte, optimalReadBufferSize) + + // Each worker will draw from the part channel and upload in parallel. + for uploadReq := range uploadPartsCh { + // Declare a new tmpBuffer. + tmpBuffer := new(bytes.Buffer) + + // If partNumber was not uploaded we calculate the missing + // part offset and size. For all other part numbers we + // calculate offset based on multiples of partSize. + readOffset := int64(uploadReq.PartNum-1) * partSize + missingPartSize := partSize + + // As a special case if partNumber is lastPartNumber, we + // calculate the offset based on the last part size. + if uploadReq.PartNum == lastPartNumber { + readOffset = (size - lastPartSize) + missingPartSize = lastPartSize + } - // Initialize a temporary buffer. - tmpBuffer := new(bytes.Buffer) + // Get a section reader on a particular offset. + sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) - // Read defaults to reading at 5MiB buffer. - readBuffer := make([]byte, optimalReadBufferSize) + // Choose the needed hash algorithms to be calculated by hashCopyBuffer. + // Sha256 is avoided in non-v4 signature requests or HTTPS connections + hashSums := make(map[string][]byte) + hashAlgos := make(map[string]hash.Hash) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } - // Upload all the missing parts. - for partNumber <= lastPartNumber { - // Verify object if its uploaded. - verifyObjPart := objectPart{ - PartNumber: partNumber, - Size: partSize, - } - // Special case if we see a last part number, save last part - // size as the proper part size. - if partNumber == lastPartNumber { - verifyObjPart = objectPart{ - PartNumber: lastPartNumber, - Size: lastPartSize, - } - } + var prtSize int64 + var err error + prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer) + if err != nil { + // Send the error back through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } - // Verify if part should be uploaded. - if !shouldUploadPartReadAt(verifyObjPart, partsInfo) { - // Increment part number when not uploaded. - partNumber++ - if progress != nil { - // Update the progress reader for the skipped part. - if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil { - return 0, err + // Verify object if its uploaded. + verifyObjPart := ObjectPart{ + PartNumber: uploadReq.PartNum, + Size: partSize, + } + // Special case if we see a last part number, save last part + // size as the proper part size. + if uploadReq.PartNum == lastPartNumber { + verifyObjPart.Size = lastPartSize + } + + // Only upload the necessary parts. Otherwise return size through channel + // to update any progress bar. + if shouldUploadPartReadAt(verifyObjPart, uploadReq) { + // Proceed to upload the part. + var objPart ObjectPart + objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize) + if err != nil { + uploadedPartsCh <- uploadedPartRes{ + Size: 0, + Error: err, + } + // Exit the goroutine. + return + } + // Save successfully uploaded part metadata. + uploadReq.Part = &objPart + } + // Send successful part info through the channel. + uploadedPartsCh <- uploadedPartRes{ + Size: verifyObjPart.Size, + PartNum: uploadReq.PartNum, + Part: uploadReq.Part, + Error: nil, } } - continue - } + }() + } - // If partNumber was not uploaded we calculate the missing - // part offset and size. For all other part numbers we - // calculate offset based on multiples of partSize. - readOffset := int64(partNumber-1) * partSize - missingPartSize := partSize - - // As a special case if partNumber is lastPartNumber, we - // calculate the offset based on the last part size. - if partNumber == lastPartNumber { - readOffset = (size - lastPartSize) - missingPartSize = lastPartSize + // Gather the responses as they occur and update any + // progress bar. + for u := 1; u <= totalPartsCount; u++ { + uploadRes := <-uploadedPartsCh + if uploadRes.Error != nil { + return totalUploadedSize, uploadRes.Error } - - // Get a section reader on a particular offset. - sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize) - - // Calculates MD5 and SHA256 sum for a section reader. - var md5Sum, sha256Sum []byte - var prtSize int64 - md5Sum, sha256Sum, prtSize, err = c.hashCopyBuffer(tmpBuffer, sectionReader, readBuffer) - if err != nil { - return 0, err + // Retrieve each uploaded part and store it to be completed. + // part, ok := partsInfo[uploadRes.PartNum] + part := uploadRes.Part + if part == nil { + return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum)) } - - var reader io.Reader - // Update progress reader appropriately to the latest offset - // as we read from the source. - reader = newHook(tmpBuffer, progress) - - // Proceed to upload the part. - var objPart objectPart - objPart, err = c.uploadPart(bucketName, objectName, uploadID, ioutil.NopCloser(reader), - partNumber, md5Sum, sha256Sum, prtSize) - if err != nil { - // Reset the buffer upon any error. - tmpBuffer.Reset() - return 0, err + // Update the totalUploadedSize. + totalUploadedSize += uploadRes.Size + // Update the progress bar if there is one. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil { + return totalUploadedSize, err + } } - - // Save successfully uploaded part metadata. - partsInfo[partNumber] = objPart - - // Increment part number here after successful part upload. - partNumber++ - - // Reset the buffer. - tmpBuffer.Reset() - } - - // Loop over uploaded parts to save them in a Parts array before completing the multipart request. - for _, part := range partsInfo { - var complPart completePart - complPart.ETag = part.ETag - complPart.PartNumber = part.PartNumber - totalUploadedSize += part.Size - complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart) + // Store the parts to be completed in order. + complMultipartUpload.Parts = append(complMultipartUpload.Parts, CompletePart{ + ETag: part.ETag, + PartNumber: part.PartNumber, + }) } // Verify if we uploaded all the data. @@ -191,11 +235,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName) } - // Verify if totalPartsCount is not equal to total list of parts. - if totalPartsCount != len(complMultipartUpload.Parts) { - return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts)) - } - // Sort all completed parts. sort.Sort(completedParts(complMultipartUpload.Parts)) _, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload) diff --git a/vendor/github.com/minio/minio-go/api-put-object.go b/vendor/github.com/minio/minio-go/api-put-object.go index a09e658f4..e218075df 100644 --- a/vendor/github.com/minio/minio-go/api-put-object.go +++ b/vendor/github.com/minio/minio-go/api-put-object.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -18,6 +18,9 @@ package minio import ( "bytes" + "crypto/md5" + "crypto/sha256" + "hash" "io" "io/ioutil" "net/http" @@ -27,78 +30,93 @@ import ( "strings" ) +// toInt - converts go value to its integer representation based +// on the value kind if it is an integer. +func toInt(value reflect.Value) (size int64) { + size = -1 + if value.IsValid() { + switch value.Kind() { + case reflect.Int: + fallthrough + case reflect.Int8: + fallthrough + case reflect.Int16: + fallthrough + case reflect.Int32: + fallthrough + case reflect.Int64: + size = value.Int() + } + } + return size +} + // getReaderSize - Determine the size of Reader if available. func getReaderSize(reader io.Reader) (size int64, err error) { - var result []reflect.Value size = -1 - if reader != nil { - // Verify if there is a method by name 'Size'. - lenFn := reflect.ValueOf(reader).MethodByName("Size") - if lenFn.IsValid() { - if lenFn.Kind() == reflect.Func { - // Call the 'Size' function and save its return value. - result = lenFn.Call([]reflect.Value{}) - if result != nil && len(result) == 1 { - lenValue := result[0] - if lenValue.IsValid() { - switch lenValue.Kind() { - case reflect.Int: - fallthrough - case reflect.Int8: - fallthrough - case reflect.Int16: - fallthrough - case reflect.Int32: - fallthrough - case reflect.Int64: - size = lenValue.Int() - } - } - } + if reader == nil { + return -1, nil + } + // Verify if there is a method by name 'Size'. + sizeFn := reflect.ValueOf(reader).MethodByName("Size") + // Verify if there is a method by name 'Len'. + lenFn := reflect.ValueOf(reader).MethodByName("Len") + if sizeFn.IsValid() { + if sizeFn.Kind() == reflect.Func { + // Call the 'Size' function and save its return value. + result := sizeFn.Call([]reflect.Value{}) + if len(result) == 1 { + size = toInt(result[0]) + } + } + } else if lenFn.IsValid() { + if lenFn.Kind() == reflect.Func { + // Call the 'Len' function and save its return value. + result := lenFn.Call([]reflect.Value{}) + if len(result) == 1 { + size = toInt(result[0]) } - } else { - // Fallback to Stat() method, two possible Stat() structs - // exist. - switch v := reader.(type) { - case *os.File: - var st os.FileInfo - st, err = v.Stat() - if err != nil { - // Handle this case specially for "windows", - // certain files for example 'Stdin', 'Stdout' and - // 'Stderr' it is not allowed to fetch file information. - if runtime.GOOS == "windows" { - if strings.Contains(err.Error(), "GetFileInformationByHandle") { - return -1, nil - } + } + } else { + // Fallback to Stat() method, two possible Stat() structs exist. + switch v := reader.(type) { + case *os.File: + var st os.FileInfo + st, err = v.Stat() + if err != nil { + // Handle this case specially for "windows", + // certain files for example 'Stdin', 'Stdout' and + // 'Stderr' it is not allowed to fetch file information. + if runtime.GOOS == "windows" { + if strings.Contains(err.Error(), "GetFileInformationByHandle") { + return -1, nil } - return - } - // Ignore if input is a directory, throw an error. - if st.Mode().IsDir() { - return -1, ErrInvalidArgument("Input file cannot be a directory.") } - // Ignore 'Stdin', 'Stdout' and 'Stderr', since they - // represent *os.File type but internally do not - // implement Seekable calls. Ignore them and treat - // them like a stream with unknown length. - switch st.Name() { - case "stdin": - fallthrough - case "stdout": - fallthrough - case "stderr": - return - } - size = st.Size() - case *Object: - var st ObjectInfo - st, err = v.Stat() - if err != nil { - return - } - size = st.Size + return + } + // Ignore if input is a directory, throw an error. + if st.Mode().IsDir() { + return -1, ErrInvalidArgument("Input file cannot be a directory.") } + // Ignore 'Stdin', 'Stdout' and 'Stderr', since they + // represent *os.File type but internally do not + // implement Seekable calls. Ignore them and treat + // them like a stream with unknown length. + switch st.Name() { + case "stdin", "stdout", "stderr": + return + // Ignore read/write stream of os.Pipe() which have unknown length too. + case "|0", "|1": + return + } + size = st.Size() + case *Object: + var st ObjectInfo + st, err = v.Stat() + if err != nil { + return + } + size = st.Size } } // Returns the size here. @@ -107,7 +125,7 @@ func getReaderSize(reader io.Reader) (size int64, err error) { // completedParts is a collection of parts sortable by their part numbers. // used for sorting the uploaded parts before completing the multipart request. -type completedParts []completePart +type completedParts []CompletePart func (a completedParts) Len() int { return len(a) } func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } @@ -125,14 +143,13 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part // NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT. // So we fall back to single PUT operation with the maximum limit of 5GiB. // -// NOTE: For anonymous requests Amazon S3 doesn't allow multipart upload. So we fall back to single PUT operation. func (c Client) PutObject(bucketName, objectName string, reader io.Reader, contentType string) (n int64, err error) { return c.PutObjectWithProgress(bucketName, objectName, reader, contentType, nil) } // putObjectNoChecksum special function used Google Cloud Storage. This special function // is used for Google Cloud Storage since Google's multipart API is not S3 compatible. -func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -146,11 +163,11 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // Update progress reader appropriately to the latest offset as we // read from the source. - reader = newHook(reader, progress) + readSeeker := newHook(reader, progress) // This function does not calculate sha256 and md5sum for payload. // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, ioutil.NopCloser(reader), nil, nil, size, contentType) + st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData) if err != nil { return 0, err } @@ -162,7 +179,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea // putObjectSingle is a special function for uploading single put object request. // This special function is used as a fallback when multipart upload fails. -func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) { +func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return 0, err @@ -177,13 +194,22 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, if size <= -1 { size = maxSinglePutObjectSize } - var md5Sum, sha256Sum []byte - var readCloser io.ReadCloser + + // Add the appropriate hash algorithms that need to be calculated by hashCopyN + // In case of non-v4 signature request or HTTPS connection, sha256 is not needed. + hashAlgos := make(map[string]hash.Hash) + hashSums := make(map[string][]byte) + hashAlgos["md5"] = md5.New() + if c.signature.isV4() && !c.secure { + hashAlgos["sha256"] = sha256.New() + } + if size <= minPartSize { // Initialize a new temporary buffer. tmpBuffer := new(bytes.Buffer) - md5Sum, sha256Sum, size, err = c.hashCopyN(tmpBuffer, reader, size) - readCloser = ioutil.NopCloser(tmpBuffer) + size, err = hashCopyN(hashAlgos, hashSums, tmpBuffer, reader, size) + reader = bytes.NewReader(tmpBuffer.Bytes()) + tmpBuffer.Reset() } else { // Initialize a new temporary file. var tmpFile *tempFile @@ -191,39 +217,41 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, if err != nil { return 0, err } - md5Sum, sha256Sum, size, err = c.hashCopyN(tmpFile, reader, size) + defer tmpFile.Close() + size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size) + if err != nil { + return 0, err + } // Seek back to beginning of the temporary file. if _, err = tmpFile.Seek(0, 0); err != nil { return 0, err } - readCloser = tmpFile + reader = tmpFile } // Return error if its not io.EOF. - if err != nil { - if err != io.EOF { - return 0, err - } - } - // Progress the reader to the size. - if progress != nil { - if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil { - return size, err - } + if err != nil && err != io.EOF { + return 0, err } // Execute put object. - st, err := c.putObjectDo(bucketName, objectName, readCloser, md5Sum, sha256Sum, size, contentType) + st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData) if err != nil { return 0, err } if st.Size != size { return 0, ErrUnexpectedEOF(st.Size, size, bucketName, objectName) } + // Progress the reader to the size if putObjectDo is successful. + if progress != nil { + if _, err = io.CopyN(ioutil.Discard, progress, size); err != nil { + return size, err + } + } return size, nil } // putObjectDo - executes the put object http operation. // NOTE: You must have WRITE permissions on a bucket to add an object to it. -func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) { +func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { return ObjectInfo{}, err @@ -240,13 +268,20 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName) } - if strings.TrimSpace(contentType) == "" { - contentType = "application/octet-stream" - } - // Set headers. customHeader := make(http.Header) - customHeader.Set("Content-Type", contentType) + + // Set metadata to headers + for k, v := range metaData { + if len(v) > 0 { + customHeader.Set(k, v[0]) + } + } + + // If Content-Type is not provided, set the default application/octet-stream one + if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 { + customHeader.Set("Content-Type", "application/octet-stream") + } // Populate request metadata. reqMetadata := requestMetadata{ @@ -258,13 +293,9 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, contentMD5Bytes: md5Sum, contentSHA256Bytes: sha256Sum, } - // Initiate new request. - req, err := c.newRequest("PUT", reqMetadata) - if err != nil { - return ObjectInfo{}, err - } - // Execute the request. - resp, err := c.do(req) + + // Execute PUT an objectName. + resp, err := c.executeMethod("PUT", reqMetadata) defer closeResponse(resp) if err != nil { return ObjectInfo{}, err @@ -275,13 +306,13 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.ReadCloser, } } - var metadata ObjectInfo + var objInfo ObjectInfo // Trim off the odd double quotes from ETag in the beginning and end. - metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") - metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"") + objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"") + objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"") // A success here means data was written to server successfully. - metadata.Size = size + objInfo.Size = size // Return here. - return metadata, nil + return objInfo, nil } diff --git a/vendor/github.com/minio/minio-go/api-remove.go b/vendor/github.com/minio/minio-go/api-remove.go index 8f59c15e6..73790f002 100644 --- a/vendor/github.com/minio/minio-go/api-remove.go +++ b/vendor/github.com/minio/minio-go/api-remove.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,6 +17,9 @@ package minio import ( + "bytes" + "encoding/xml" + "io" "net/http" "net/url" ) @@ -30,15 +33,11 @@ func (c Client) RemoveBucket(bucketName string) error { if err := isValidBucketName(bucketName); err != nil { return err } - // Instantiate a new request. - req, err := c.newRequest("DELETE", requestMetadata{ - bucketName: bucketName, + // Execute DELETE on bucket. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return err - } - // Initiate the request. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -64,28 +63,153 @@ func (c Client) RemoveObject(bucketName, objectName string) error { if err := isValidObjectName(objectName); err != nil { return err } - // Instantiate the request. - req, err := c.newRequest("DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, + // Execute DELETE on objectName. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return err - } - // Initiate the request. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err } + if resp != nil { + // if some unexpected error happened and max retry is reached, we want to let client know + if resp.StatusCode != http.StatusNoContent { + return httpRespToErrorResponse(resp, bucketName, objectName) + } + } + // DeleteObject always responds with http '204' even for // objects which do not exist. So no need to handle them // specifically. return nil } +// RemoveObjectError - container of Multi Delete S3 API error +type RemoveObjectError struct { + ObjectName string + Err error +} + +// generateRemoveMultiObjects - generate the XML request for remove multi objects request +func generateRemoveMultiObjectsRequest(objects []string) []byte { + rmObjects := []deleteObject{} + for _, obj := range objects { + rmObjects = append(rmObjects, deleteObject{Key: obj}) + } + xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true}) + return xmlBytes +} + +// processRemoveMultiObjectsResponse - parse the remove multi objects web service +// and return the success/failure result status for each object +func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) { + // Parse multi delete XML response + rmResult := &deleteMultiObjectsResult{} + err := xmlDecoder(body, rmResult) + if err != nil { + errorCh <- RemoveObjectError{ObjectName: "", Err: err} + return + } + + // Fill deletion that returned an error. + for _, obj := range rmResult.UnDeletedObjects { + errorCh <- RemoveObjectError{ + ObjectName: obj.Key, + Err: ErrorResponse{ + Code: obj.Code, + Message: obj.Message, + }, + } + } +} + +// RemoveObjects remove multiples objects from a bucket. +// The list of objects to remove are received from objectsCh. +// Remove failures are sent back via error channel. +func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError { + errorCh := make(chan RemoveObjectError, 1) + + // Validate if bucket name is valid. + if err := isValidBucketName(bucketName); err != nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: err, + } + return errorCh + } + // Validate objects channel to be properly allocated. + if objectsCh == nil { + defer close(errorCh) + errorCh <- RemoveObjectError{ + Err: ErrInvalidArgument("Objects channel cannot be nil"), + } + return errorCh + } + + // Generate and call MultiDelete S3 requests based on entries received from objectsCh + go func(errorCh chan<- RemoveObjectError) { + maxEntries := 1000 + finish := false + urlValues := make(url.Values) + urlValues.Set("delete", "") + + // Close error channel when Multi delete finishes. + defer close(errorCh) + + // Loop over entries by 1000 and call MultiDelete requests + for { + if finish { + break + } + count := 0 + var batch []string + + // Try to gather 1000 entries + for object := range objectsCh { + batch = append(batch, object) + if count++; count >= maxEntries { + break + } + } + if count == 0 { + // Multi Objects Delete API doesn't accept empty object list, quit immediatly + break + } + if count < maxEntries { + // We didn't have 1000 entries, so this is the last batch + finish = true + } + + // Generate remove multi objects XML request + removeBytes := generateRemoveMultiObjectsRequest(batch) + // Execute GET on bucket to list objects. + resp, err := c.executeMethod("POST", requestMetadata{ + bucketName: bucketName, + queryValues: urlValues, + contentBody: bytes.NewReader(removeBytes), + contentLength: int64(len(removeBytes)), + contentMD5Bytes: sumMD5(removeBytes), + contentSHA256Bytes: sum256(removeBytes), + }) + if err != nil { + for _, b := range batch { + errorCh <- RemoveObjectError{ObjectName: b, Err: err} + } + continue + } + + // Process multiobjects remove xml response + processRemoveMultiObjectsResponse(resp.Body, batch, errorCh) + + closeResponse(resp) + } + }(errorCh) + return errorCh +} + // RemoveIncompleteUpload aborts an partially uploaded object. -// Requires explicit authentication, no anonymous requests are allowed for multipart API. func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error { // Input validation. if err := isValidBucketName(bucketName); err != nil { @@ -124,18 +248,13 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er urlValues := make(url.Values) urlValues.Set("uploadId", uploadID) - // Instantiate a new DELETE request. - req, err := c.newRequest("DELETE", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - queryValues: urlValues, + // Execute DELETE on multipart upload. + resp, err := c.executeMethod("DELETE", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + queryValues: urlValues, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return err - } - - // Initiate the request. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { return err @@ -149,13 +268,13 @@ func (c Client) abortMultipartUpload(bucketName, objectName, uploadID string) er // This is needed specifically for abort and it cannot // be converged into default case. errorResponse = ErrorResponse{ - Code: "NoSuchUpload", - Message: "The specified multipart upload does not exist.", - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "NoSuchUpload", + Message: "The specified multipart upload does not exist.", + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), } default: return httpRespToErrorResponse(resp, bucketName, objectName) diff --git a/vendor/github.com/minio/minio-go/api-s3-definitions.go b/vendor/github.com/minio/minio-go/api-s3-datatypes.go similarity index 62% rename from vendor/github.com/minio/minio-go/api-s3-definitions.go rename to vendor/github.com/minio/minio-go/api-s3-datatypes.go index de562e475..ec63d6b94 100644 --- a/vendor/github.com/minio/minio-go/api-s3-definitions.go +++ b/vendor/github.com/minio/minio-go/api-s3-datatypes.go @@ -41,8 +41,37 @@ type commonPrefix struct { Prefix string } -// listBucketResult container for listObjects response. -type listBucketResult struct { +// ListBucketV2Result container for listObjects response version 2. +type ListBucketV2Result struct { + // A response can contain CommonPrefixes only if you have + // specified a delimiter. + CommonPrefixes []commonPrefix + // Metadata about each object returned. + Contents []ObjectInfo + Delimiter string + + // Encoding type used to encode object keys in the response. + EncodingType string + + // A flag that indicates whether or not ListObjects returned all of the results + // that satisfied the search criteria. + IsTruncated bool + MaxKeys int64 + Name string + + // Hold the token that will be sent in the next request to fetch the next group of keys + NextContinuationToken string + + ContinuationToken string + Prefix string + + // FetchOwner and StartAfter are currently not used + FetchOwner string + StartAfter string +} + +// ListBucketResult container for listObjects response. +type ListBucketResult struct { // A response can contain CommonPrefixes only if you have // specified a delimiter. CommonPrefixes []commonPrefix @@ -73,8 +102,8 @@ type listBucketResult struct { Prefix string } -// listMultipartUploadsResult container for ListMultipartUploads response -type listMultipartUploadsResult struct { +// ListMultipartUploadsResult container for ListMultipartUploads response +type ListMultipartUploadsResult struct { Bucket string KeyMarker string UploadIDMarker string `xml:"UploadIdMarker"` @@ -96,8 +125,14 @@ type initiator struct { DisplayName string } -// objectPart container for particular part of an object. -type objectPart struct { +// copyObjectResult container for copy object response. +type copyObjectResult struct { + ETag string + LastModified string // time string format "2006-01-02T15:04:05.000Z" +} + +// ObjectPart container for particular part of an object. +type ObjectPart struct { // Part number identifies the part. PartNumber int @@ -112,8 +147,8 @@ type objectPart struct { Size int64 } -// listObjectPartsResult container for ListObjectParts response. -type listObjectPartsResult struct { +// ListObjectPartsResult container for ListObjectParts response. +type ListObjectPartsResult struct { Bucket string Key string UploadID string `xml:"UploadId"` @@ -128,7 +163,7 @@ type listObjectPartsResult struct { // Indicates whether the returned list of parts is truncated. IsTruncated bool - ObjectParts []objectPart `xml:"Part"` + ObjectParts []ObjectPart `xml:"Part"` EncodingType string } @@ -150,9 +185,9 @@ type completeMultipartUploadResult struct { ETag string } -// completePart sub container lists individual part numbers and their +// CompletePart sub container lists individual part numbers and their // md5sum, part of completeMultipartUpload. -type completePart struct { +type CompletePart struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Part" json:"-"` // Part number identifies the part. @@ -163,7 +198,7 @@ type completePart struct { // completeMultipartUpload container for completing multipart upload. type completeMultipartUpload struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` - Parts []completePart `xml:"Part"` + Parts []CompletePart `xml:"Part"` } // createBucketConfiguration container for bucket configuration. @@ -172,26 +207,38 @@ type createBucketConfiguration struct { Location string `xml:"LocationConstraint"` } -// grant container for the grantee and his or her permissions. -type grant struct { - // grantee container for DisplayName and ID of the person being - // granted permissions. - Grantee struct { - ID string - DisplayName string - EmailAddress string - Type string - URI string - } - Permission string +// deleteObject container for Delete element in MultiObjects Delete XML request +type deleteObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` } -// accessControlPolicy contains the elements providing ACL permissions -// for a bucket. -type accessControlPolicy struct { - // accessControlList container for ACL information. - AccessControlList struct { - Grant []grant - } - Owner owner +// deletedObject container for Deleted element in MultiObjects Delete XML response +type deletedObject struct { + Key string + VersionID string `xml:"VersionId,omitempty"` + // These fields are ignored. + DeleteMarker bool + DeleteMarkerVersionID string +} + +// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response +type nonDeletedObject struct { + Key string + Code string + Message string +} + +// deletedMultiObjects container for MultiObjects Delete XML request +type deleteMultiObjects struct { + XMLName xml.Name `xml:"Delete"` + Quiet bool + Objects []deleteObject `xml:"Object"` +} + +// deletedMultiObjectsResult container for MultiObjects Delete XML response +type deleteMultiObjectsResult struct { + XMLName xml.Name `xml:"DeleteResult"` + DeletedObjects []deletedObject `xml:"Deleted"` + UnDeletedObjects []nonDeletedObject `xml:"Error"` } diff --git a/vendor/github.com/minio/minio-go/api-stat.go b/vendor/github.com/minio/minio-go/api-stat.go index 20f66e8fc..5b3dfe1b4 100644 --- a/vendor/github.com/minio/minio-go/api-stat.go +++ b/vendor/github.com/minio/minio-go/api-stat.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,33 +21,60 @@ import ( "strconv" "strings" "time" + + "github.com/minio/minio-go/pkg/s3utils" ) // BucketExists verify if bucket exists and you have permission to access it. -func (c Client) BucketExists(bucketName string) error { +func (c Client) BucketExists(bucketName string) (bool, error) { // Input validation. if err := isValidBucketName(bucketName); err != nil { - return err + return false, err } - // Instantiate a new request. - req, err := c.newRequest("HEAD", requestMetadata{ - bucketName: bucketName, + + // Execute HEAD on bucketName. + resp, err := c.executeMethod("HEAD", requestMetadata{ + bucketName: bucketName, + contentSHA256Bytes: emptySHA256, }) - if err != nil { - return err - } - // Initiate the request. - resp, err := c.do(req) defer closeResponse(resp) if err != nil { - return err + if ToErrorResponse(err).Code == "NoSuchBucket" { + return false, nil + } + return false, err } if resp != nil { if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp, bucketName, "") + return false, httpRespToErrorResponse(resp, bucketName, "") } } - return nil + return true, nil +} + +// List of header keys to be filtered, usually +// from all S3 API http responses. +var defaultFilterKeys = []string{ + "Transfer-Encoding", + "Accept-Ranges", + "Date", + "Server", + "Vary", + "x-amz-request-id", + "x-amz-id-2", + // Add new headers to be ignored. +} + +// Extract only necessary metadata header key/values by +// filtering them out with a list of custom header keys. +func extractObjMetadata(header http.Header) http.Header { + filterKeys := append([]string{ + "ETag", + "Content-Length", + "Last-Modified", + "Content-Type", + }, defaultFilterKeys...) + return filterHeader(header, filterKeys) } // StatObject verifies if object exists and you have permission to access. @@ -59,16 +86,32 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { if err := isValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - // Instantiate a new request. - req, err := c.newRequest("HEAD", requestMetadata{ - bucketName: bucketName, - objectName: objectName, - }) - if err != nil { + reqHeaders := NewHeadReqHeaders() + return c.statObject(bucketName, objectName, reqHeaders) +} + +// Lower level API for statObject supporting pre-conditions and range headers. +func (c Client) statObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { + // Input validation. + if err := isValidBucketName(bucketName); err != nil { + return ObjectInfo{}, err + } + if err := isValidObjectName(objectName); err != nil { return ObjectInfo{}, err } - // Initiate the request. - resp, err := c.do(req) + + customHeader := make(http.Header) + for k, v := range reqHeaders.Header { + customHeader[k] = v + } + + // Execute HEAD on objectName. + resp, err := c.executeMethod("HEAD", requestMetadata{ + bucketName: bucketName, + objectName: objectName, + contentSHA256Bytes: emptySHA256, + customHeader: customHeader, + }) defer closeResponse(resp) if err != nil { return ObjectInfo{}, err @@ -83,43 +126,56 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) { md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"") md5sum = strings.TrimSuffix(md5sum, "\"") - // Parse content length. - size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) - if err != nil { - return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Content-Length is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + // Content-Length is not valid for Google Cloud Storage, do not verify. + var size int64 = -1 + if !s3utils.IsGoogleEndpoint(c.endpointURL) { + // Parse content length. + size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) + if err != nil { + return ObjectInfo{}, ErrorResponse{ + Code: "InternalError", + Message: "Content-Length is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), + } } } + // Parse Last-Modified has http time format. date, err := time.Parse(http.TimeFormat, resp.Header.Get("Last-Modified")) if err != nil { return ObjectInfo{}, ErrorResponse{ - Code: "InternalError", - Message: "Last-Modified time format is invalid. " + reportIssue, - BucketName: bucketName, - Key: objectName, - RequestID: resp.Header.Get("x-amz-request-id"), - HostID: resp.Header.Get("x-amz-id-2"), - AmzBucketRegion: resp.Header.Get("x-amz-bucket-region"), + Code: "InternalError", + Message: "Last-Modified time format is invalid. " + reportIssue, + BucketName: bucketName, + Key: objectName, + RequestID: resp.Header.Get("x-amz-request-id"), + HostID: resp.Header.Get("x-amz-id-2"), + Region: resp.Header.Get("x-amz-bucket-region"), } } + // Fetch content type if any present. contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { contentType = "application/octet-stream" } + + // Extract only the relevant header keys describing the object. + // following function filters out a list of standard set of keys + // which are not part of object metadata. + metadata := extractObjMetadata(resp.Header) + // Save object metadata info. - var objectStat ObjectInfo - objectStat.ETag = md5sum - objectStat.Key = objectName - objectStat.Size = size - objectStat.LastModified = date - objectStat.ContentType = contentType - return objectStat, nil + return ObjectInfo{ + ETag: md5sum, + Key: objectName, + Size: size, + LastModified: date, + ContentType: contentType, + Metadata: metadata, + }, nil } diff --git a/vendor/github.com/minio/minio-go/api.go b/vendor/github.com/minio/minio-go/api.go index 304044965..a563a18d4 100644 --- a/vendor/github.com/minio/minio-go/api.go +++ b/vendor/github.com/minio/minio-go/api.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,8 +20,11 @@ import ( "bytes" "encoding/base64" "encoding/hex" + "errors" "fmt" "io" + "io/ioutil" + "math/rand" "net/http" "net/http/httputil" "net/url" @@ -29,13 +32,20 @@ import ( "regexp" "runtime" "strings" + "sync" "time" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" ) // Client implements Amazon S3 compatible methods. type Client struct { /// Standard options. + // Parsed endpoint url provided by the user. + endpointURL url.URL + // AccessKeyID required for authorized requests. accessKeyID string // SecretAccessKey required for authorized requests. @@ -50,21 +60,32 @@ type Client struct { appName string appVersion string } - endpointURL *url.URL + + // Indicate whether we are using https or not + secure bool // Needs allocation. httpClient *http.Client bucketLocCache *bucketLocationCache - // Advanced functionality + // Advanced functionality. isTraceEnabled bool traceOutput io.Writer + + // S3 specific accelerated endpoint. + s3AccelerateEndpoint string + + // Region endpoint + region string + + // Random seed. + random *rand.Rand } // Global constants. const ( libraryName = "minio-go" - libraryVersion = "1.0.0" + libraryVersion = "2.1.0" ) // User Agent should always following the below style. @@ -78,11 +99,12 @@ const ( // NewV2 - instantiate minio client with Amazon S3 signature version // '2' compatibility. -func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) +func NewV2(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) if err != nil { return nil, err } + // Set to use signature version '2'. clnt.signature = SignatureV2 return clnt, nil @@ -90,38 +112,85 @@ func NewV2(endpoint string, accessKeyID, secretAccessKey string, insecure bool) // NewV4 - instantiate minio client with Amazon S3 signature version // '4' compatibility. -func NewV4(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) +func NewV4(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) if err != nil { return nil, err } + // Set to use signature version '4'. clnt.signature = SignatureV4 return clnt, nil } -// New - instantiate minio client Client, adds automatic verification -// of signature. -func New(endpoint string, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { - clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, insecure) +// New - instantiate minio client Client, adds automatic verification of signature. +func New(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { + return NewWithRegion(endpoint, accessKeyID, secretAccessKey, secure, "") +} + +// NewWithRegion - instantiate minio client, with region configured. Unlike New(), +// NewWithRegion avoids bucket-location lookup operations and it is slightly faster. +// Use this function when if your application deals with single region. +func NewWithRegion(endpoint, accessKeyID, secretAccessKey string, secure bool, region string) (*Client, error) { + clnt, err := privateNew(endpoint, accessKeyID, secretAccessKey, secure) if err != nil { return nil, err } - // Google cloud storage should be set to signature V2, force it if - // not. - if isGoogleEndpoint(clnt.endpointURL) { + + // Google cloud storage should be set to signature V2, force it if not. + if s3utils.IsGoogleEndpoint(clnt.endpointURL) { clnt.signature = SignatureV2 } + // If Amazon S3 set to signature v2.n - if isAmazonEndpoint(clnt.endpointURL) { + if s3utils.IsAmazonEndpoint(clnt.endpointURL) { clnt.signature = SignatureV4 } + + // Sets custom region, if region is empty bucket location cache is used automatically. + clnt.region = region + + // Success.. return clnt, nil } -func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (*Client, error) { +// lockedRandSource provides protected rand source, implements rand.Source interface. +type lockedRandSource struct { + lk sync.Mutex + src rand.Source +} + +// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. +func (r *lockedRandSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +// Seed uses the provided seed value to initialize the generator to a +// deterministic state. +func (r *lockedRandSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} + +// redirectHeaders copies all headers when following a redirect URL. +// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800) +func redirectHeaders(req *http.Request, via []*http.Request) error { + if len(via) == 0 { + return nil + } + for key, val := range via[0].Header { + req.Header[key] = val + } + return nil +} + +func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) { // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, insecure) + endpointURL, err := getEndpointURL(endpoint, secure) if err != nil { return nil, err } @@ -130,32 +199,32 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, insecure bool) (* clnt := new(Client) clnt.accessKeyID = accessKeyID clnt.secretAccessKey = secretAccessKey - if clnt.accessKeyID == "" || clnt.secretAccessKey == "" { - clnt.anonymous = true - } + + // Remember whether we are using https or not + clnt.secure = secure // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL + clnt.endpointURL = *endpointURL // Instantiate http client and bucket location cache. clnt.httpClient = &http.Client{ - // Setting a sensible time out of 30secs to wait for response - // headers. Request is pro-actively cancelled after 30secs - // with no response. - Timeout: 30 * time.Second, - Transport: http.DefaultTransport, + Transport: http.DefaultTransport, + CheckRedirect: redirectHeaders, } + // Instantiae bucket location cache. clnt.bucketLocCache = newBucketLocationCache() + // Introduce a new locked random seed. + clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) + // Return. return clnt, nil } // SetAppInfo - add application details to user agent. func (c *Client) SetAppInfo(appName string, appVersion string) { - // if app name and version is not set, we do not a new user - // agent. + // if app name and version not set, we do not set a new user agent. if appName != "" && appVersion != "" { c.appInfo = struct { appName string @@ -206,8 +275,18 @@ func (c *Client) TraceOff() { c.isTraceEnabled = false } -// requestMetadata - is container for all the values to make a -// request. +// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your +// requests. This feature is only specific to S3 for all other endpoints this +// function does nothing. To read further details on s3 transfer acceleration +// please vist - +// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html +func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) { + if s3utils.IsAmazonEndpoint(c.endpointURL) { + c.s3AccelerateEndpoint = accelerateEndpoint + } +} + +// requestMetadata - is container for all the values to make a request. type requestMetadata struct { // If set newRequest presigns the URL. presignURL bool @@ -221,16 +300,21 @@ type requestMetadata struct { // Generated by our internal code. bucketLocation string - contentBody io.ReadCloser + contentBody io.Reader contentLength int64 contentSHA256Bytes []byte contentMD5Bytes []byte } +// regCred matches credential string in HTTP header +var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/") + +// regCred matches signature string in HTTP header +var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)") + // Filter out signature value from Authorization header. func (c Client) filterSignature(req *http.Request) { - // For anonymous requests, no need to filter. - if c.anonymous { + if _, ok := req.Header["Authorization"]; !ok { return } // Handle if Signature V2. @@ -246,11 +330,9 @@ func (c Client) filterSignature(req *http.Request) { origAuth := req.Header.Get("Authorization") // Strip out accessKeyID from: // Credential=////aws4_request - regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/") newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") // Strip out 256-bit signature from: Signature=<256-bit signature> - regSign := regexp.MustCompile("Signature=([[0-9a-f]+)") newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") // Set a temporary redacted auth @@ -299,7 +381,7 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { // to zero. Keep this workaround until the above bug is fixed. if resp.ContentLength == 0 { var buffer bytes.Buffer - if err := resp.Header.Write(&buffer); err != nil { + if err = resp.Header.Write(&buffer); err != nil { return err } respTrace = buffer.Bytes() @@ -329,17 +411,43 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error { // do - execute http request. func (c Client) do(req *http.Request) (*http.Response, error) { - // execute the request. - resp, err := c.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang - // versions fix this issue properly. - urlErr, ok := err.(*url.Error) - if ok && strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL) + var resp *http.Response + var err error + // Do the request in a loop in case of 307 http is met since golang still doesn't + // handle properly this situation (https://github.com/golang/go/issues/7912) + for { + resp, err = c.httpClient.Do(req) + if err != nil { + // Handle this specifically for now until future Golang + // versions fix this issue properly. + urlErr, ok := err.(*url.Error) + if ok && strings.Contains(urlErr.Err.Error(), "EOF") { + return nil, &url.Error{ + Op: urlErr.Op, + URL: urlErr.URL, + Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), + } + } + return nil, err + } + // Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise + if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect { + newURL, err := url.Parse(resp.Header.Get("Location")) + if err != nil { + break + } + req.URL = newURL + } else { + break } - return resp, err } + + // Response cannot be non-nil, report if its the case. + if resp == nil { + msg := "Response is empty. " + reportIssue + return nil, ErrInvalidArgument(msg) + } + // If trace is enabled, dump http request and response. if c.isTraceEnabled { err = c.dumpHTTP(req, resp) @@ -350,6 +458,122 @@ func (c Client) do(req *http.Request) (*http.Response, error) { return resp, nil } +// List of success status. +var successStatus = []int{ + http.StatusOK, + http.StatusNoContent, + http.StatusPartialContent, +} + +// executeMethod - instantiates a given method, and retries the +// request upon any error up to maxRetries attempts in a binomially +// delayed manner using a standard back off algorithm. +func (c Client) executeMethod(method string, metadata requestMetadata) (res *http.Response, err error) { + var isRetryable bool // Indicates if request can be retried. + var bodySeeker io.Seeker // Extracted seeker from io.Reader. + if metadata.contentBody != nil { + // Check if body is seekable then it is retryable. + bodySeeker, isRetryable = metadata.contentBody.(io.Seeker) + switch bodySeeker { + case os.Stdin, os.Stdout, os.Stderr: + isRetryable = false + } + } + + // Create a done channel to control 'newRetryTimer' go routine. + doneCh := make(chan struct{}, 1) + + // Indicate to our routine to exit cleanly upon return. + defer close(doneCh) + + // Blank indentifier is kept here on purpose since 'range' without + // blank identifiers is only supported since go1.4 + // https://golang.org/doc/go1.4#forrange. + for _ = range c.newRetryTimer(MaxRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter, doneCh) { + // Retry executes the following function body if request has an + // error until maxRetries have been exhausted, retry attempts are + // performed after waiting for a given period of time in a + // binomial fashion. + if isRetryable { + // Seek back to beginning for each attempt. + if _, err = bodySeeker.Seek(0, 0); err != nil { + // If seek failed, no need to retry. + return nil, err + } + } + + // Instantiate a new request. + var req *http.Request + req, err = c.newRequest(method, metadata) + if err != nil { + errResponse := ToErrorResponse(err) + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + return nil, err + } + + // Initiate the request. + res, err = c.do(req) + if err != nil { + // For supported network errors verify. + if isNetErrorRetryable(err) { + continue // Retry. + } + // For other errors, return here no need to retry. + return nil, err + } + + // For any known successful http status, return quickly. + for _, httpStatus := range successStatus { + if httpStatus == res.StatusCode { + return res, nil + } + } + + // Read the body to be saved later. + errBodyBytes, err := ioutil.ReadAll(res.Body) + // res.Body should be closed + closeResponse(res) + if err != nil { + return nil, err + } + + // Save the body. + errBodySeeker := bytes.NewReader(errBodyBytes) + res.Body = ioutil.NopCloser(errBodySeeker) + + // For errors verify if its retryable otherwise fail quickly. + errResponse := ToErrorResponse(httpRespToErrorResponse(res, metadata.bucketName, metadata.objectName)) + + // Save the body back again. + errBodySeeker.Seek(0, 0) // Seek back to starting point. + res.Body = ioutil.NopCloser(errBodySeeker) + + // Bucket region if set in error response and the error + // code dictates invalid region, we can retry the request + // with the new region. + if errResponse.Code == "InvalidRegion" && errResponse.Region != "" { + c.bucketLocCache.Set(metadata.bucketName, errResponse.Region) + continue // Retry. + } + + // Verify if error response code is retryable. + if isS3CodeRetryable(errResponse.Code) { + continue // Retry. + } + + // Verify if http status code is retryable. + if isHTTPStatusRetryable(res.StatusCode) { + continue // Retry. + } + + // For all other cases break out of the retry loop. + break + } + return res, err +} + // newRequest - instantiate a new HTTP request for a given method. func (c Client) newRequest(method string, metadata requestMetadata) (req *http.Request, err error) { // If no method is supplied default to 'POST'. @@ -357,8 +581,17 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R method = "POST" } + // Default all requests to "us-east-1" or "cn-north-1" (china region) + location := "us-east-1" + if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { + // For china specifically we need to set everything to + // cn-north-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // China region. + location = "cn-north-1" + } + // Gather location only if bucketName is present. - location := "us-east-1" // Default all other requests to "us-east-1". if metadata.bucketName != "" { location, err = c.getBucketLocation(metadata.bucketName) if err != nil { @@ -376,32 +609,30 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R } // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), nil) + req, err = http.NewRequest(method, targetURL.String(), metadata.contentBody) if err != nil { return nil, err } + // Anonymous request. + anonymous := c.accessKeyID == "" || c.secretAccessKey == "" + // Generate presign url if needed, return right here. if metadata.expires != 0 && metadata.presignURL { - if c.anonymous { + if anonymous { return nil, ErrInvalidArgument("Requests cannot be presigned with anonymous credentials.") } if c.signature.isV2() { // Presign URL with signature v2. - req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) - } else { + req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires) + } else if c.signature.isV4() { // Presign URL with signature v4. - req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) + req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires) } return req, nil } - // Set content body if available. - if metadata.contentBody != nil { - req.Body = metadata.contentBody - } - - // set UserAgent for the request. + // Set 'User-Agent' header for the request. c.setUserAgent(req) // Set all headers. @@ -414,32 +645,32 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R req.ContentLength = metadata.contentLength } - // Set sha256 sum only for non anonymous credentials. - if !c.anonymous { - // set sha256 sum for signature calculation only with - // signature version '4'. - if c.signature.isV4() { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) - if metadata.contentSHA256Bytes != nil { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(metadata.contentSHA256Bytes)) - } - } - } - // set md5Sum for content protection. if metadata.contentMD5Bytes != nil { - req.Header.Set("Content-MD5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) + req.Header.Set("Content-Md5", base64.StdEncoding.EncodeToString(metadata.contentMD5Bytes)) } - // Sign the request for all authenticated requests. - if !c.anonymous { - if c.signature.isV2() { - // Add signature version '2' authorization header. - req = signV2(*req, c.accessKeyID, c.secretAccessKey) - } else if c.signature.isV4() { - // Add signature version '4' authorization header. - req = signV4(*req, c.accessKeyID, c.secretAccessKey, location) + if anonymous { + return req, nil + } // Sign the request for all authenticated requests. + + switch { + case c.signature.isV2(): + // Add signature version '2' authorization header. + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) + case c.signature.isStreamingV4() && method == "PUT": + req = s3signer.StreamingSignV4(req, c.accessKeyID, + c.secretAccessKey, location, metadata.contentLength, time.Now().UTC()) + default: + // Set sha256 sum for signature calculation only with signature version '4'. + shaHeader := unsignedPayload + if len(metadata.contentSHA256Bytes) > 0 { + shaHeader = hex.EncodeToString(metadata.contentSHA256Bytes) } + req.Header.Set("X-Amz-Content-Sha256", shaHeader) + + // Add signature version '4' authorization header. + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location) } // Return request. @@ -456,13 +687,25 @@ func (c Client) setUserAgent(req *http.Request) { // makeTargetURL make a new target url. func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) { - // Save host. host := c.endpointURL.Host // For Amazon S3 endpoint, try to fetch location based endpoint. - if isAmazonEndpoint(c.endpointURL) { - // Fetch new host based on the bucket location. - host = getS3Endpoint(bucketLocation) + if s3utils.IsAmazonEndpoint(c.endpointURL) { + if c.s3AccelerateEndpoint != "" && bucketName != "" { + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + // Disable transfer acceleration for non-compliant bucket names. + if strings.Contains(bucketName, ".") { + return nil, ErrTransferAccelerationBucket(bucketName) + } + // If transfer acceleration is requested set new host. + // For more details about enabling transfer acceleration read here. + // http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html + host = c.s3AccelerateEndpoint + } else { + // Fetch new host based on the bucket location. + host = getS3Endpoint(bucketLocation) + } } + // Save scheme. scheme := c.endpointURL.Scheme @@ -471,7 +714,7 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que // endpoint URL. if bucketName != "" { // Save if target url will have buckets which suppport virtual host. - isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName) + isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName) // If endpoint supports virtual host style use that always. // Currently only S3 and Google Cloud Storage would support @@ -479,24 +722,23 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que if isVirtualHostStyle { urlStr = scheme + "://" + bucketName + "." + host + "/" if objectName != "" { - urlStr = urlStr + urlEncodePath(objectName) + urlStr = urlStr + s3utils.EncodePath(objectName) } } else { // If not fall back to using path style. urlStr = urlStr + bucketName + "/" if objectName != "" { - urlStr = urlStr + urlEncodePath(objectName) + urlStr = urlStr + s3utils.EncodePath(objectName) } } } // If there are any query values, add them to the end. if len(queryValues) > 0 { - urlStr = urlStr + "?" + queryEncode(queryValues) + urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues) } u, err := url.Parse(urlStr) if err != nil { return nil, err } - return u, nil } diff --git a/vendor/github.com/minio/minio-go/appveyor.yml b/vendor/github.com/minio/minio-go/appveyor.yml index 5b8824d45..be746a7bf 100644 --- a/vendor/github.com/minio/minio-go/appveyor.yml +++ b/vendor/github.com/minio/minio-go/appveyor.yml @@ -17,15 +17,16 @@ install: - go version - go env - go get -u github.com/golang/lint/golint - - go get -u golang.org/x/tools/cmd/vet - go get -u github.com/remyoudompheng/go-misc/deadcode + - go get -u github.com/gordonklaus/ineffassign # to run your custom scripts instead of automatic MSBuild build_script: - go vet ./... - gofmt -s -l . - - golint github.com/minio/minio-go... + - golint -set_exit_status github.com/minio/minio-go... - deadcode + - ineffassign . - go test -short -v - go test -short -race -v diff --git a/vendor/github.com/minio/minio-go/bucket-acl.go b/vendor/github.com/minio/minio-go/bucket-acl.go deleted file mode 100644 index d8eda0f54..000000000 --- a/vendor/github.com/minio/minio-go/bucket-acl.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package minio - -// BucketACL - Bucket level access control. -type BucketACL string - -// Different types of ACL's currently supported for buckets. -const ( - bucketPrivate = BucketACL("private") - bucketReadOnly = BucketACL("public-read") - bucketPublic = BucketACL("public-read-write") - bucketAuthenticated = BucketACL("authenticated-read") -) - -// Stringify acl. -func (b BucketACL) String() string { - if string(b) == "" { - return "private" - } - return string(b) -} - -// isValidBucketACL - Is provided acl string supported. -func (b BucketACL) isValidBucketACL() bool { - switch true { - case b.isPrivate(): - fallthrough - case b.isReadOnly(): - fallthrough - case b.isPublic(): - fallthrough - case b.isAuthenticated(): - return true - case b.String() == "private": - // By default its "private" - return true - default: - return false - } -} - -// isPrivate - Is acl Private. -func (b BucketACL) isPrivate() bool { - return b == bucketPrivate -} - -// isPublicRead - Is acl PublicRead. -func (b BucketACL) isReadOnly() bool { - return b == bucketReadOnly -} - -// isPublicReadWrite - Is acl PublicReadWrite. -func (b BucketACL) isPublic() bool { - return b == bucketPublic -} - -// isAuthenticated - Is acl AuthenticatedRead. -func (b BucketACL) isAuthenticated() bool { - return b == bucketAuthenticated -} diff --git a/vendor/github.com/minio/minio-go/bucket-cache.go b/vendor/github.com/minio/minio-go/bucket-cache.go index c0b4f0cb8..28799c69d 100644 --- a/vendor/github.com/minio/minio-go/bucket-cache.go +++ b/vendor/github.com/minio/minio-go/bucket-cache.go @@ -20,11 +20,14 @@ import ( "encoding/hex" "net/http" "net/url" - "path/filepath" + "path" "sync" + + "github.com/minio/minio-go/pkg/s3signer" + "github.com/minio/minio-go/pkg/s3utils" ) -// bucketLocationCache - Provides simple mechansim to hold bucket +// bucketLocationCache - Provides simple mechanism to hold bucket // locations in memory. type bucketLocationCache struct { // mutex is used for handling the concurrent @@ -65,13 +68,35 @@ func (r *bucketLocationCache) Delete(bucketName string) { delete(r.items, bucketName) } -// getBucketLocation - Get location for the bucketName from location map cache. +// GetBucketLocation - get location for the bucket name from location cache, if not +// fetch freshly by making a new request. +func (c Client) GetBucketLocation(bucketName string) (string, error) { + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + return c.getBucketLocation(bucketName) +} + +// getBucketLocation - Get location for the bucketName from location map cache, if not +// fetch freshly by making a new request. func (c Client) getBucketLocation(bucketName string) (string, error) { - // For anonymous requests, default to "us-east-1" and let other calls - // move forward. - if c.anonymous { - return "us-east-1", nil + if err := isValidBucketName(bucketName); err != nil { + return "", err + } + + if s3utils.IsAmazonChinaEndpoint(c.endpointURL) { + // For china specifically we need to set everything to + // cn-north-1 for now, there is no easier way until AWS S3 + // provides a cleaner compatible API across "us-east-1" and + // China region. + return "cn-north-1", nil + } + + // Region set then no need to fetch bucket location. + if c.region != "" { + return c.region, nil } + if location, ok := c.bucketLocCache.Get(bucketName); ok { return location, nil } @@ -88,9 +113,27 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { if err != nil { return "", err } + location, err := processBucketLocationResponse(resp, bucketName) + if err != nil { + return "", err + } + c.bucketLocCache.Set(bucketName, location) + return location, nil +} + +// processes the getBucketLocation http response from the server. +func processBucketLocationResponse(resp *http.Response, bucketName string) (bucketLocation string, err error) { if resp != nil { if resp.StatusCode != http.StatusOK { - return "", httpRespToErrorResponse(resp, bucketName, "") + err = httpRespToErrorResponse(resp, bucketName, "") + errResp := ToErrorResponse(err) + // For access denied error, it could be an anonymous + // request. Move forward and let the top level callers + // succeed if possible based on their policy. + if errResp.Code == "AccessDenied" { + return "us-east-1", nil + } + return "", err } } @@ -113,7 +156,6 @@ func (c Client) getBucketLocation(bucketName string) (string, error) { } // Save the location into cache. - c.bucketLocCache.Set(bucketName, location) // Return. return location, nil @@ -127,7 +169,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro // Set get bucket location always as path style. targetURL := c.endpointURL - targetURL.Path = filepath.Join(bucketName, "") + "/" + targetURL.Path = path.Join(bucketName, "") + "/" targetURL.RawQuery = urlValues.Encode() // Get a new HTTP request for the method. @@ -141,14 +183,20 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro // Set sha256 sum for signature calculation only with signature version '4'. if c.signature.isV4() { - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256([]byte{}))) + var contentSha256 string + if c.secure { + contentSha256 = unsignedPayload + } else { + contentSha256 = hex.EncodeToString(sum256([]byte{})) + } + req.Header.Set("X-Amz-Content-Sha256", contentSha256) } // Sign the request. if c.signature.isV4() { - req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") + req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1") } else if c.signature.isV2() { - req = signV2(*req, c.accessKeyID, c.secretAccessKey) + req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey) } return req, nil } diff --git a/vendor/github.com/minio/minio-go/bucket-notification.go b/vendor/github.com/minio/minio-go/bucket-notification.go new file mode 100644 index 000000000..5ac52e5f7 --- /dev/null +++ b/vendor/github.com/minio/minio-go/bucket-notification.go @@ -0,0 +1,231 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "encoding/xml" + "reflect" +) + +// NotificationEventType is a S3 notification event associated to the bucket notification configuration +type NotificationEventType string + +// The role of all event types are described in : +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +const ( + ObjectCreatedAll NotificationEventType = "s3:ObjectCreated:*" + ObjectCreatedPut = "s3:ObjectCreated:Put" + ObjectCreatedPost = "s3:ObjectCreated:Post" + ObjectCreatedCopy = "s3:ObjectCreated:Copy" + ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + ObjectAccessedGet = "s3:ObjectAccessed:Get" + ObjectAccessedHead = "s3:ObjectAccessed:Head" + ObjectAccessedAll = "s3:ObjectAccessed:*" + ObjectRemovedAll = "s3:ObjectRemoved:*" + ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" + ObjectReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" +) + +// FilterRule - child of S3Key, a tag in the notification xml which +// carries suffix/prefix filters +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// S3Key - child of Filter, a tag in the notification xml which +// carries suffix/prefix filters +type S3Key struct { + FilterRules []FilterRule `xml:"FilterRule,omitempty"` +} + +// Filter - a tag in the notification xml structure which carries +// suffix/prefix filters +type Filter struct { + S3Key S3Key `xml:"S3Key,omitempty"` +} + +// Arn - holds ARN information that will be sent to the web service, +// ARN desciption can be found in http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html +type Arn struct { + Partition string + Service string + Region string + AccountID string + Resource string +} + +// NewArn creates new ARN based on the given partition, service, region, account id and resource +func NewArn(partition, service, region, accountID, resource string) Arn { + return Arn{Partition: partition, + Service: service, + Region: region, + AccountID: accountID, + Resource: resource} +} + +// Return the string format of the ARN +func (arn Arn) String() string { + return "arn:" + arn.Partition + ":" + arn.Service + ":" + arn.Region + ":" + arn.AccountID + ":" + arn.Resource +} + +// NotificationConfig - represents one single notification configuration +// such as topic, queue or lambda configuration. +type NotificationConfig struct { + ID string `xml:"Id,omitempty"` + Arn Arn `xml:"-"` + Events []NotificationEventType `xml:"Event"` + Filter *Filter `xml:"Filter,omitempty"` +} + +// NewNotificationConfig creates one notification config and sets the given ARN +func NewNotificationConfig(arn Arn) NotificationConfig { + return NotificationConfig{Arn: arn} +} + +// AddEvents adds one event to the current notification config +func (t *NotificationConfig) AddEvents(events ...NotificationEventType) { + t.Events = append(t.Events, events...) +} + +// AddFilterSuffix sets the suffix configuration to the current notification config +func (t *NotificationConfig) AddFilterSuffix(suffix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "suffix", Value: suffix} + // Replace any suffix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "suffix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// AddFilterPrefix sets the prefix configuration to the current notification config +func (t *NotificationConfig) AddFilterPrefix(prefix string) { + if t.Filter == nil { + t.Filter = &Filter{} + } + newFilterRule := FilterRule{Name: "prefix", Value: prefix} + // Replace any prefix rule if existing and add to the list otherwise + for index := range t.Filter.S3Key.FilterRules { + if t.Filter.S3Key.FilterRules[index].Name == "prefix" { + t.Filter.S3Key.FilterRules[index] = newFilterRule + return + } + } + t.Filter.S3Key.FilterRules = append(t.Filter.S3Key.FilterRules, newFilterRule) +} + +// TopicConfig carries one single topic notification configuration +type TopicConfig struct { + NotificationConfig + Topic string `xml:"Topic"` +} + +// QueueConfig carries one single queue notification configuration +type QueueConfig struct { + NotificationConfig + Queue string `xml:"Queue"` +} + +// LambdaConfig carries one single cloudfunction notification configuration +type LambdaConfig struct { + NotificationConfig + Lambda string `xml:"CloudFunction"` +} + +// BucketNotification - the struct that represents the whole XML to be sent to the web service +type BucketNotification struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + LambdaConfigs []LambdaConfig `xml:"CloudFunctionConfiguration"` + TopicConfigs []TopicConfig `xml:"TopicConfiguration"` + QueueConfigs []QueueConfig `xml:"QueueConfiguration"` +} + +// AddTopic adds a given topic config to the general bucket notification config +func (b *BucketNotification) AddTopic(topicConfig NotificationConfig) { + newTopicConfig := TopicConfig{NotificationConfig: topicConfig, Topic: topicConfig.Arn.String()} + for _, n := range b.TopicConfigs { + if reflect.DeepEqual(n, newTopicConfig) { + // Avoid adding duplicated entry + return + } + } + b.TopicConfigs = append(b.TopicConfigs, newTopicConfig) +} + +// AddQueue adds a given queue config to the general bucket notification config +func (b *BucketNotification) AddQueue(queueConfig NotificationConfig) { + newQueueConfig := QueueConfig{NotificationConfig: queueConfig, Queue: queueConfig.Arn.String()} + for _, n := range b.QueueConfigs { + if reflect.DeepEqual(n, newQueueConfig) { + // Avoid adding duplicated entry + return + } + } + b.QueueConfigs = append(b.QueueConfigs, newQueueConfig) +} + +// AddLambda adds a given lambda config to the general bucket notification config +func (b *BucketNotification) AddLambda(lambdaConfig NotificationConfig) { + newLambdaConfig := LambdaConfig{NotificationConfig: lambdaConfig, Lambda: lambdaConfig.Arn.String()} + for _, n := range b.LambdaConfigs { + if reflect.DeepEqual(n, newLambdaConfig) { + // Avoid adding duplicated entry + return + } + } + b.LambdaConfigs = append(b.LambdaConfigs, newLambdaConfig) +} + +// RemoveTopicByArn removes all topic configurations that match the exact specified ARN +func (b *BucketNotification) RemoveTopicByArn(arn Arn) { + var topics []TopicConfig + for _, topic := range b.TopicConfigs { + if topic.Topic != arn.String() { + topics = append(topics, topic) + } + } + b.TopicConfigs = topics +} + +// RemoveQueueByArn removes all queue configurations that match the exact specified ARN +func (b *BucketNotification) RemoveQueueByArn(arn Arn) { + var queues []QueueConfig + for _, queue := range b.QueueConfigs { + if queue.Queue != arn.String() { + queues = append(queues, queue) + } + } + b.QueueConfigs = queues +} + +// RemoveLambdaByArn removes all lambda configurations that match the exact specified ARN +func (b *BucketNotification) RemoveLambdaByArn(arn Arn) { + var lambdas []LambdaConfig + for _, lambda := range b.LambdaConfigs { + if lambda.Lambda != arn.String() { + lambdas = append(lambdas, lambda) + } + } + b.LambdaConfigs = lambdas +} diff --git a/vendor/github.com/minio/minio-go/constants.go b/vendor/github.com/minio/minio-go/constants.go index b0aa009d8..6055bfdad 100644 --- a/vendor/github.com/minio/minio-go/constants.go +++ b/vendor/github.com/minio/minio-go/constants.go @@ -18,9 +18,9 @@ package minio /// Multipart upload defaults. -// miniPartSize - minimum part size 5MiB per object after which +// miniPartSize - minimum part size 64MiB per object after which // putObject behaves internally as multipart. -const minPartSize = 1024 * 1024 * 5 +const minPartSize = 1024 * 1024 * 64 // maxPartsCount - maximum number of parts for a single multipart session. const maxPartsCount = 10000 @@ -40,3 +40,23 @@ const maxMultipartPutObjectSize = 1024 * 1024 * 1024 * 1024 * 5 // optimalReadBufferSize - optimal buffer 5MiB used for reading // through Read operation. const optimalReadBufferSize = 1024 * 1024 * 5 + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +// we don't want to sign the request payload +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// Total number of parallel workers used for multipart operation. +var totalWorkers = 3 + +// Signature related constants. +const ( + signV4Algorithm = "AWS4-HMAC-SHA256" + iso8601DateFormat = "20060102T150405Z" +) + +// Encryption headers stored along with the object. +const ( + amzHeaderIV = "X-Amz-Meta-X-Amz-Iv" + amzHeaderKey = "X-Amz-Meta-X-Amz-Key" + amzHeaderMatDesc = "X-Amz-Meta-X-Amz-Matdesc" +) diff --git a/vendor/github.com/minio/minio-go/copy-conditions.go b/vendor/github.com/minio/minio-go/copy-conditions.go new file mode 100644 index 000000000..65018aa09 --- /dev/null +++ b/vendor/github.com/minio/minio-go/copy-conditions.go @@ -0,0 +1,99 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net/http" + "time" +) + +// copyCondition explanation: +// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectCOPY.html +// +// Example: +// +// copyCondition { +// key: "x-amz-copy-if-modified-since", +// value: "Tue, 15 Nov 1994 12:45:26 GMT", +// } +// +type copyCondition struct { + key string + value string +} + +// CopyConditions - copy conditions. +type CopyConditions struct { + conditions []copyCondition +} + +// NewCopyConditions - Instantiate new list of conditions. This +// function is left behind for backward compatibility. The idiomatic +// way to set an empty set of copy conditions is, +// ``copyConditions := CopyConditions{}``. +// +func NewCopyConditions() CopyConditions { + return CopyConditions{} +} + +// SetMatchETag - set match etag. +func (c *CopyConditions) SetMatchETag(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-match", + value: etag, + }) + return nil +} + +// SetMatchETagExcept - set match etag except. +func (c *CopyConditions) SetMatchETagExcept(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-none-match", + value: etag, + }) + return nil +} + +// SetUnmodified - set unmodified time since. +func (c *CopyConditions) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-unmodified-since", + value: modTime.Format(http.TimeFormat), + }) + return nil +} + +// SetModified - set modified time since. +func (c *CopyConditions) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.conditions = append(c.conditions, copyCondition{ + key: "x-amz-copy-source-if-modified-since", + value: modTime.Format(http.TimeFormat), + }) + return nil +} diff --git a/vendor/github.com/minio/minio-go/core.go b/vendor/github.com/minio/minio-go/core.go new file mode 100644 index 000000000..be9388cec --- /dev/null +++ b/vendor/github.com/minio/minio-go/core.go @@ -0,0 +1,113 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "io" + + "github.com/minio/minio-go/pkg/policy" +) + +// Core - Inherits Client and adds new methods to expose the low level S3 APIs. +type Core struct { + *Client +} + +// NewCore - Returns new initialized a Core client, this CoreClient should be +// only used under special conditions such as need to access lower primitives +// and being able to use them to write your own wrappers. +func NewCore(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Core, error) { + var s3Client Core + client, err := NewV4(endpoint, accessKeyID, secretAccessKey, secure) + if err != nil { + return nil, err + } + s3Client.Client = client + return &s3Client, nil +} + +// ListObjects - List all the objects at a prefix, optionally with marker and delimiter +// you can further filter the results. +func (c Core) ListObjects(bucket, prefix, marker, delimiter string, maxKeys int) (result ListBucketResult, err error) { + return c.listObjectsQuery(bucket, prefix, marker, delimiter, maxKeys) +} + +// ListObjectsV2 - Lists all the objects at a prefix, similar to ListObjects() but uses +// continuationToken instead of marker to further filter the results. +func (c Core) ListObjectsV2(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (ListBucketV2Result, error) { + return c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, maxkeys) +} + +// PutObject - Upload object. Uploads using single PUT call. +func (c Core) PutObject(bucket, object string, size int64, data io.Reader, md5Sum, sha256Sum []byte, metadata map[string][]string) (ObjectInfo, error) { + return c.putObjectDo(bucket, object, data, md5Sum, sha256Sum, size, metadata) +} + +// NewMultipartUpload - Initiates new multipart upload and returns the new uploaID. +func (c Core) NewMultipartUpload(bucket, object string, metadata map[string][]string) (uploadID string, err error) { + result, err := c.initiateMultipartUpload(bucket, object, metadata) + return result.UploadID, err +} + +// ListMultipartUploads - List incomplete uploads. +func (c Core) ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartUploadsResult, err error) { + return c.listMultipartUploadsQuery(bucket, keyMarker, uploadIDMarker, prefix, delimiter, maxUploads) +} + +// PutObjectPart - Upload an object part. +func (c Core) PutObjectPart(bucket, object, uploadID string, partID int, size int64, data io.Reader, md5Sum, sha256Sum []byte) (ObjectPart, error) { + return c.uploadPart(bucket, object, uploadID, data, partID, md5Sum, sha256Sum, size) +} + +// ListObjectParts - List uploaded parts of an incomplete upload.x +func (c Core) ListObjectParts(bucket, object, uploadID string, partNumberMarker int, maxParts int) (result ListObjectPartsResult, err error) { + return c.listObjectPartsQuery(bucket, object, uploadID, partNumberMarker, maxParts) +} + +// CompleteMultipartUpload - Concatenate uploaded parts and commit to an object. +func (c Core) CompleteMultipartUpload(bucket, object, uploadID string, parts []CompletePart) error { + _, err := c.completeMultipartUpload(bucket, object, uploadID, completeMultipartUpload{Parts: parts}) + return err +} + +// AbortMultipartUpload - Abort an incomplete upload. +func (c Core) AbortMultipartUpload(bucket, object, uploadID string) error { + return c.abortMultipartUpload(bucket, object, uploadID) +} + +// GetBucketPolicy - fetches bucket access policy for a given bucket. +func (c Core) GetBucketPolicy(bucket string) (policy.BucketAccessPolicy, error) { + return c.getBucketPolicy(bucket) +} + +// PutBucketPolicy - applies a new bucket access policy for a given bucket. +func (c Core) PutBucketPolicy(bucket string, bucketPolicy policy.BucketAccessPolicy) error { + return c.putBucketPolicy(bucket, bucketPolicy) +} + +// GetObject is a lower level API implemented to support reading +// partial objects and also downloading objects with special conditions +// matching etag, modtime etc. +func (c Core) GetObject(bucketName, objectName string, reqHeaders RequestHeaders) (io.ReadCloser, ObjectInfo, error) { + return c.getObject(bucketName, objectName, reqHeaders) +} + +// StatObject is a lower level API implemented to support special +// conditions matching etag, modtime on a request. +func (c Core) StatObject(bucketName, objectName string, reqHeaders RequestHeaders) (ObjectInfo, error) { + return c.statObject(bucketName, objectName, reqHeaders) +} diff --git a/vendor/github.com/minio/minio-go/hook-reader.go b/vendor/github.com/minio/minio-go/hook-reader.go index 043425f23..bc9ece049 100644 --- a/vendor/github.com/minio/minio-go/hook-reader.go +++ b/vendor/github.com/minio/minio-go/hook-reader.go @@ -1,5 +1,5 @@ /* - * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,6 +27,22 @@ type hookReader struct { hook io.Reader } +// Seek implements io.Seeker. Seeks source first, and if necessary +// seeks hook if Seek method is appropriately found. +func (hr *hookReader) Seek(offset int64, whence int) (n int64, err error) { + // Verify for source has embedded Seeker, use it. + sourceSeeker, ok := hr.source.(io.Seeker) + if ok { + return sourceSeeker.Seek(offset, whence) + } + // Verify if hook has embedded Seeker, use it. + hookSeeker, ok := hr.hook.(io.Seeker) + if ok { + return hookSeeker.Seek(offset, whence) + } + return n, nil +} + // Read implements io.Reader. Always reads from the source, the return // value 'n' number of bytes are reported through the hook. Returns // error for all non io.EOF conditions. @@ -44,7 +60,7 @@ func (hr *hookReader) Read(b []byte) (n int, err error) { return n, err } -// newHook returns a io.Reader which implements hookReader that +// newHook returns a io.ReadSeeker which implements hookReader that // reports the data read from the source to the hook. func newHook(source, hook io.Reader) io.Reader { if hook == nil { diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go new file mode 100644 index 000000000..7670e68f4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/cbc.go @@ -0,0 +1,284 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "encoding/base64" + "errors" + "io" +) + +// Crypt mode - encryption or decryption +type cryptMode int + +const ( + encryptMode cryptMode = iota + decryptMode +) + +// CBCSecureMaterials encrypts/decrypts data using AES CBC algorithm +type CBCSecureMaterials struct { + + // Data stream to encrypt/decrypt + stream io.Reader + + // Last internal error + err error + + // End of file reached + eof bool + + // Holds initial data + srcBuf *bytes.Buffer + + // Holds transformed data (encrypted or decrypted) + dstBuf *bytes.Buffer + + // Encryption algorithm + encryptionKey Key + + // Key to encrypts/decrypts data + contentKey []byte + + // Encrypted form of contentKey + cryptedKey []byte + + // Initialization vector + iv []byte + + // matDesc - currently unused + matDesc []byte + + // Indicate if we are going to encrypt or decrypt + cryptMode cryptMode + + // Helper that encrypts/decrypts data + blockMode cipher.BlockMode +} + +// NewCBCSecureMaterials builds new CBC crypter module with +// the specified encryption key (symmetric or asymmetric) +func NewCBCSecureMaterials(key Key) (*CBCSecureMaterials, error) { + if key == nil { + return nil, errors.New("Unable to recognize empty encryption properties") + } + return &CBCSecureMaterials{ + srcBuf: bytes.NewBuffer([]byte{}), + dstBuf: bytes.NewBuffer([]byte{}), + encryptionKey: key, + matDesc: []byte("{}"), + }, nil + +} + +// SetupEncryptMode - tells CBC that we are going to encrypt data +func (s *CBCSecureMaterials) SetupEncryptMode(stream io.Reader) error { + // Set mode to encrypt + s.cryptMode = encryptMode + + // Set underlying reader + s.stream = stream + + s.eof = false + s.srcBuf.Reset() + s.dstBuf.Reset() + + var err error + + // Generate random content key + s.contentKey = make([]byte, aes.BlockSize*2) + if _, err := rand.Read(s.contentKey); err != nil { + return err + } + // Encrypt content key + s.cryptedKey, err = s.encryptionKey.Encrypt(s.contentKey) + if err != nil { + return err + } + // Generate random IV + s.iv = make([]byte, aes.BlockSize) + if _, err = rand.Read(s.iv); err != nil { + return err + } + // New cipher + encryptContentBlock, err := aes.NewCipher(s.contentKey) + if err != nil { + return err + } + + s.blockMode = cipher.NewCBCEncrypter(encryptContentBlock, s.iv) + + return nil +} + +// SetupDecryptMode - tells CBC that we are going to decrypt data +func (s *CBCSecureMaterials) SetupDecryptMode(stream io.Reader, iv string, key string) error { + // Set mode to decrypt + s.cryptMode = decryptMode + + // Set underlying reader + s.stream = stream + + // Reset + s.eof = false + s.srcBuf.Reset() + s.dstBuf.Reset() + + var err error + + // Get IV + s.iv, err = base64.StdEncoding.DecodeString(iv) + if err != nil { + return err + } + + // Get encrypted content key + s.cryptedKey, err = base64.StdEncoding.DecodeString(key) + if err != nil { + return err + } + + // Decrypt content key + s.contentKey, err = s.encryptionKey.Decrypt(s.cryptedKey) + if err != nil { + return err + } + + // New cipher + decryptContentBlock, err := aes.NewCipher(s.contentKey) + if err != nil { + return err + } + + s.blockMode = cipher.NewCBCDecrypter(decryptContentBlock, s.iv) + return nil +} + +// GetIV - return randomly generated IV (per S3 object), base64 encoded. +func (s *CBCSecureMaterials) GetIV() string { + return base64.StdEncoding.EncodeToString(s.iv) +} + +// GetKey - return content encrypting key (cek) in encrypted form, base64 encoded. +func (s *CBCSecureMaterials) GetKey() string { + return base64.StdEncoding.EncodeToString(s.cryptedKey) +} + +// GetDesc - user provided encryption material description in JSON (UTF8) format. +func (s *CBCSecureMaterials) GetDesc() string { + return string(s.matDesc) +} + +// Fill buf with encrypted/decrypted data +func (s *CBCSecureMaterials) Read(buf []byte) (n int, err error) { + // Always fill buf from bufChunk at the end of this function + defer func() { + if s.err != nil { + n, err = 0, s.err + } else { + n, err = s.dstBuf.Read(buf) + } + }() + + // Return + if s.eof { + return + } + + // Fill dest buffer if its length is less than buf + for !s.eof && s.dstBuf.Len() < len(buf) { + + srcPart := make([]byte, aes.BlockSize) + dstPart := make([]byte, aes.BlockSize) + + // Fill src buffer + for s.srcBuf.Len() < aes.BlockSize*2 { + _, err = io.CopyN(s.srcBuf, s.stream, aes.BlockSize) + if err != nil { + break + } + } + + // Quit immediately for errors other than io.EOF + if err != nil && err != io.EOF { + s.err = err + return + } + + // Mark current encrypting/decrypting as finished + s.eof = (err == io.EOF) + + if s.eof && s.cryptMode == encryptMode { + if srcPart, err = pkcs5Pad(s.srcBuf.Bytes(), aes.BlockSize); err != nil { + s.err = err + return + } + } else { + _, _ = s.srcBuf.Read(srcPart) + } + + // Crypt srcPart content + for len(srcPart) > 0 { + + // Crypt current part + s.blockMode.CryptBlocks(dstPart, srcPart[:aes.BlockSize]) + + // Unpad when this is the last part and we are decrypting + if s.eof && s.cryptMode == decryptMode { + dstPart, err = pkcs5Unpad(dstPart, aes.BlockSize) + if err != nil { + s.err = err + return + } + } + + // Send crypted data to dstBuf + if _, wErr := s.dstBuf.Write(dstPart); wErr != nil { + s.err = wErr + return + } + // Move to the next part + srcPart = srcPart[aes.BlockSize:] + } + } + return +} + +// Unpad a set of bytes following PKCS5 algorithm +func pkcs5Unpad(buf []byte, blockSize int) ([]byte, error) { + len := len(buf) + if len == 0 { + return nil, errors.New("buffer is empty") + } + pad := int(buf[len-1]) + if pad > len || pad > blockSize { + return nil, errors.New("invalid padding size") + } + return buf[:len-pad], nil +} + +// Pad a set of bytes following PKCS5 algorithm +func pkcs5Pad(buf []byte, blockSize int) ([]byte, error) { + len := len(buf) + pad := blockSize - (len % blockSize) + padText := bytes.Repeat([]byte{byte(pad)}, pad) + return append(buf, padText...), nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go new file mode 100644 index 000000000..2fd75033f --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/interface.go @@ -0,0 +1,50 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package encrypt implements a generic interface to encrypt any stream of data. +// currently this package implements two types of encryption +// - Symmetric encryption using AES. +// - Asymmetric encrytion using RSA. +package encrypt + +import "io" + +// Materials - provides generic interface to encrypt any stream of data. +type Materials interface { + + // Returns encrypted/decrypted data, io.Reader compatible. + Read(b []byte) (int, error) + + // Get randomly generated IV, base64 encoded. + GetIV() (iv string) + + // Get content encrypting key (cek) in encrypted form, base64 encoded. + GetKey() (key string) + + // Get user provided encryption material description in + // JSON (UTF8) format. This is not used, kept for future. + GetDesc() (desc string) + + // Setup encrypt mode, further calls of Read() function + // will return the encrypted form of data streamed + // by the passed reader + SetupEncryptMode(stream io.Reader) error + + // Setup decrypted mode, further calls of Read() function + // will return the decrypted form of data streamed + // by the passed reader + SetupDecryptMode(stream io.Reader, iv string, key string) error +} diff --git a/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go new file mode 100644 index 000000000..8814845e3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/encrypt/keys.go @@ -0,0 +1,165 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package encrypt + +import ( + "crypto/aes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "errors" +) + +// Key - generic interface to encrypt/decrypt a key. +// We use it to encrypt/decrypt content key which is the key +// that encrypt/decrypt object data. +type Key interface { + // Encrypt data using to the set encryption key + Encrypt([]byte) ([]byte, error) + // Decrypt data using to the set encryption key + Decrypt([]byte) ([]byte, error) +} + +// SymmetricKey - encrypts data with a symmetric master key +type SymmetricKey struct { + masterKey []byte +} + +// Encrypt passed bytes +func (s *SymmetricKey) Encrypt(plain []byte) ([]byte, error) { + // Initialize an AES encryptor using a master key + keyBlock, err := aes.NewCipher(s.masterKey) + if err != nil { + return []byte{}, err + } + + // Pad the key before encryption + plain, _ = pkcs5Pad(plain, aes.BlockSize) + + encKey := []byte{} + encPart := make([]byte, aes.BlockSize) + + // Encrypt the passed key by block + for { + if len(plain) < aes.BlockSize { + break + } + // Encrypt the passed key + keyBlock.Encrypt(encPart, plain[:aes.BlockSize]) + // Add the encrypted block to the total encrypted key + encKey = append(encKey, encPart...) + // Pass to the next plain block + plain = plain[aes.BlockSize:] + } + return encKey, nil +} + +// Decrypt passed bytes +func (s *SymmetricKey) Decrypt(cipher []byte) ([]byte, error) { + // Initialize AES decrypter + keyBlock, err := aes.NewCipher(s.masterKey) + if err != nil { + return nil, err + } + + var plain []byte + plainPart := make([]byte, aes.BlockSize) + + // Decrypt the encrypted data block by block + for { + if len(cipher) < aes.BlockSize { + break + } + keyBlock.Decrypt(plainPart, cipher[:aes.BlockSize]) + // Add the decrypted block to the total result + plain = append(plain, plainPart...) + // Pass to the next cipher block + cipher = cipher[aes.BlockSize:] + } + + // Unpad the resulted plain data + plain, err = pkcs5Unpad(plain, aes.BlockSize) + if err != nil { + return nil, err + } + + return plain, nil +} + +// NewSymmetricKey generates a new encrypt/decrypt crypto using +// an AES master key password +func NewSymmetricKey(b []byte) *SymmetricKey { + return &SymmetricKey{masterKey: b} +} + +// AsymmetricKey - struct which encrypts/decrypts data +// using RSA public/private certificates +type AsymmetricKey struct { + publicKey *rsa.PublicKey + privateKey *rsa.PrivateKey +} + +// Encrypt data using public key +func (a *AsymmetricKey) Encrypt(plain []byte) ([]byte, error) { + cipher, err := rsa.EncryptPKCS1v15(rand.Reader, a.publicKey, plain) + if err != nil { + return nil, err + } + return cipher, nil +} + +// Decrypt data using public key +func (a *AsymmetricKey) Decrypt(cipher []byte) ([]byte, error) { + cipher, err := rsa.DecryptPKCS1v15(rand.Reader, a.privateKey, cipher) + if err != nil { + return nil, err + } + return cipher, nil +} + +// NewAsymmetricKey - generates a crypto module able to encrypt/decrypt +// data using a pair for private and public key +func NewAsymmetricKey(privData []byte, pubData []byte) (*AsymmetricKey, error) { + // Parse private key from passed data + priv, err := x509.ParsePKCS8PrivateKey(privData) + if err != nil { + return nil, err + } + privKey, ok := priv.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("not a valid private key") + } + + // Parse public key from passed data + pub, err := x509.ParsePKIXPublicKey(pubData) + if err != nil { + return nil, err + } + + pubKey, ok := pub.(*rsa.PublicKey) + if !ok { + return nil, errors.New("not a valid public key") + } + + // Associate the private key with the passed public key + privKey.PublicKey = *pubKey + + return &AsymmetricKey{ + publicKey: pubKey, + privateKey: privKey, + }, nil +} diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go new file mode 100644 index 000000000..078bcd1db --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy-condition.go @@ -0,0 +1,115 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import "github.com/minio/minio-go/pkg/set" + +// ConditionKeyMap - map of policy condition key and value. +type ConditionKeyMap map[string]set.StringSet + +// Add - adds key and value. The value is appended If key already exists. +func (ckm ConditionKeyMap) Add(key string, value set.StringSet) { + if v, ok := ckm[key]; ok { + ckm[key] = v.Union(value) + } else { + ckm[key] = set.CopyStringSet(value) + } +} + +// Remove - removes value of given key. If key has empty after removal, the key is also removed. +func (ckm ConditionKeyMap) Remove(key string, value set.StringSet) { + if v, ok := ckm[key]; ok { + if value != nil { + ckm[key] = v.Difference(value) + } + + if ckm[key].IsEmpty() { + delete(ckm, key) + } + } +} + +// RemoveKey - removes key and its value. +func (ckm ConditionKeyMap) RemoveKey(key string) { + if _, ok := ckm[key]; ok { + delete(ckm, key) + } +} + +// CopyConditionKeyMap - returns new copy of given ConditionKeyMap. +func CopyConditionKeyMap(condKeyMap ConditionKeyMap) ConditionKeyMap { + out := make(ConditionKeyMap) + + for k, v := range condKeyMap { + out[k] = set.CopyStringSet(v) + } + + return out +} + +// mergeConditionKeyMap - returns a new ConditionKeyMap which contains merged key/value of given two ConditionKeyMap. +func mergeConditionKeyMap(condKeyMap1 ConditionKeyMap, condKeyMap2 ConditionKeyMap) ConditionKeyMap { + out := CopyConditionKeyMap(condKeyMap1) + + for k, v := range condKeyMap2 { + if ev, ok := out[k]; ok { + out[k] = ev.Union(v) + } else { + out[k] = set.CopyStringSet(v) + } + } + + return out +} + +// ConditionMap - map of condition and conditional values. +type ConditionMap map[string]ConditionKeyMap + +// Add - adds condition key and condition value. The value is appended if key already exists. +func (cond ConditionMap) Add(condKey string, condKeyMap ConditionKeyMap) { + if v, ok := cond[condKey]; ok { + cond[condKey] = mergeConditionKeyMap(v, condKeyMap) + } else { + cond[condKey] = CopyConditionKeyMap(condKeyMap) + } +} + +// Remove - removes condition key and its value. +func (cond ConditionMap) Remove(condKey string) { + if _, ok := cond[condKey]; ok { + delete(cond, condKey) + } +} + +// mergeConditionMap - returns new ConditionMap which contains merged key/value of two ConditionMap. +func mergeConditionMap(condMap1 ConditionMap, condMap2 ConditionMap) ConditionMap { + out := make(ConditionMap) + + for k, v := range condMap1 { + out[k] = CopyConditionKeyMap(v) + } + + for k, v := range condMap2 { + if ev, ok := out[k]; ok { + out[k] = mergeConditionKeyMap(ev, v) + } else { + out[k] = CopyConditionKeyMap(v) + } + } + + return out +} diff --git a/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go new file mode 100644 index 000000000..b2d46e178 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/policy/bucket-policy.go @@ -0,0 +1,634 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package policy + +import ( + "reflect" + "strings" + + "github.com/minio/minio-go/pkg/set" +) + +// BucketPolicy - Bucket level policy. +type BucketPolicy string + +// Different types of Policies currently supported for buckets. +const ( + BucketPolicyNone BucketPolicy = "none" + BucketPolicyReadOnly = "readonly" + BucketPolicyReadWrite = "readwrite" + BucketPolicyWriteOnly = "writeonly" +) + +// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise. +func (p BucketPolicy) IsValidBucketPolicy() bool { + switch p { + case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly: + return true + } + return false +} + +// Resource prefix for all aws resources. +const awsResourcePrefix = "arn:aws:s3:::" + +// Common bucket actions for both read and write policies. +var commonBucketActions = set.CreateStringSet("s3:GetBucketLocation") + +// Read only bucket actions. +var readOnlyBucketActions = set.CreateStringSet("s3:ListBucket") + +// Write only bucket actions. +var writeOnlyBucketActions = set.CreateStringSet("s3:ListBucketMultipartUploads") + +// Read only object actions. +var readOnlyObjectActions = set.CreateStringSet("s3:GetObject") + +// Write only object actions. +var writeOnlyObjectActions = set.CreateStringSet("s3:AbortMultipartUpload", "s3:DeleteObject", "s3:ListMultipartUploadParts", "s3:PutObject") + +// Read and write object actions. +var readWriteObjectActions = readOnlyObjectActions.Union(writeOnlyObjectActions) + +// All valid bucket and object actions. +var validActions = commonBucketActions. + Union(readOnlyBucketActions). + Union(writeOnlyBucketActions). + Union(readOnlyObjectActions). + Union(writeOnlyObjectActions) + +var startsWithFunc = func(resource string, resourcePrefix string) bool { + return strings.HasPrefix(resource, resourcePrefix) +} + +// User - canonical users list. +type User struct { + AWS set.StringSet `json:"AWS,omitempty"` + CanonicalUser set.StringSet `json:"CanonicalUser,omitempty"` +} + +// Statement - minio policy statement +type Statement struct { + Actions set.StringSet `json:"Action"` + Conditions ConditionMap `json:"Condition,omitempty"` + Effect string + Principal User `json:"Principal"` + Resources set.StringSet `json:"Resource"` + Sid string +} + +// BucketAccessPolicy - minio policy collection +type BucketAccessPolicy struct { + Version string // date in YYYY-MM-DD format + Statements []Statement `json:"Statement"` +} + +// isValidStatement - returns whether given statement is valid to process for given bucket name. +func isValidStatement(statement Statement, bucketName string) bool { + if statement.Actions.Intersection(validActions).IsEmpty() { + return false + } + + if statement.Effect != "Allow" { + return false + } + + if statement.Principal.AWS == nil || !statement.Principal.AWS.Contains("*") { + return false + } + + bucketResource := awsResourcePrefix + bucketName + if statement.Resources.Contains(bucketResource) { + return true + } + + if statement.Resources.FuncMatch(startsWithFunc, bucketResource+"/").IsEmpty() { + return false + } + + return true +} + +// Returns new statements with bucket actions for given policy. +func newBucketStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + if policy == BucketPolicyNone || bucketName == "" { + return statements + } + + bucketResource := set.CreateStringSet(awsResourcePrefix + bucketName) + + statement := Statement{ + Actions: commonBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + statements = append(statements, statement) + + if policy == BucketPolicyReadOnly || policy == BucketPolicyReadWrite { + statement = Statement{ + Actions: readOnlyBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + if prefix != "" { + condKeyMap := make(ConditionKeyMap) + condKeyMap.Add("s3:prefix", set.CreateStringSet(prefix)) + condMap := make(ConditionMap) + condMap.Add("StringEquals", condKeyMap) + statement.Conditions = condMap + } + statements = append(statements, statement) + } + + if policy == BucketPolicyWriteOnly || policy == BucketPolicyReadWrite { + statement = Statement{ + Actions: writeOnlyBucketActions, + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: bucketResource, + Sid: "", + } + statements = append(statements, statement) + } + + return statements +} + +// Returns new statements contains object actions for given policy. +func newObjectStatement(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + if policy == BucketPolicyNone || bucketName == "" { + return statements + } + + statement := Statement{ + Effect: "Allow", + Principal: User{AWS: set.CreateStringSet("*")}, + Resources: set.CreateStringSet(awsResourcePrefix + bucketName + "/" + prefix + "*"), + Sid: "", + } + + if policy == BucketPolicyReadOnly { + statement.Actions = readOnlyObjectActions + } else if policy == BucketPolicyWriteOnly { + statement.Actions = writeOnlyObjectActions + } else if policy == BucketPolicyReadWrite { + statement.Actions = readWriteObjectActions + } + + statements = append(statements, statement) + return statements +} + +// Returns new statements for given policy, bucket and prefix. +func newStatements(policy BucketPolicy, bucketName string, prefix string) (statements []Statement) { + statements = []Statement{} + ns := newBucketStatement(policy, bucketName, prefix) + statements = append(statements, ns...) + + ns = newObjectStatement(policy, bucketName, prefix) + statements = append(statements, ns...) + + return statements +} + +// Returns whether given bucket statements are used by other than given prefix statements. +func getInUsePolicy(statements []Statement, bucketName string, prefix string) (readOnlyInUse, writeOnlyInUse bool) { + resourcePrefix := awsResourcePrefix + bucketName + "/" + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + + for _, s := range statements { + if !s.Resources.Contains(objectResource) && !s.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() { + if s.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { + readOnlyInUse = true + } + + if s.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { + writeOnlyInUse = true + } + } + if readOnlyInUse && writeOnlyInUse { + break + } + } + + return readOnlyInUse, writeOnlyInUse +} + +// Removes object actions in given statement. +func removeObjectActions(statement Statement, objectResource string) Statement { + if statement.Conditions == nil { + if len(statement.Resources) > 1 { + statement.Resources.Remove(objectResource) + } else { + statement.Actions = statement.Actions.Difference(readOnlyObjectActions) + statement.Actions = statement.Actions.Difference(writeOnlyObjectActions) + } + } + + return statement +} + +// Removes bucket actions for given policy in given statement. +func removeBucketActions(statement Statement, prefix string, bucketResource string, readOnlyInUse, writeOnlyInUse bool) Statement { + removeReadOnly := func() { + if !statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { + return + } + + if statement.Conditions == nil { + statement.Actions = statement.Actions.Difference(readOnlyBucketActions) + return + } + + if prefix != "" { + stringEqualsValue := statement.Conditions["StringEquals"] + values := set.NewStringSet() + if stringEqualsValue != nil { + values = stringEqualsValue["s3:prefix"] + if values == nil { + values = set.NewStringSet() + } + } + + values.Remove(prefix) + + if stringEqualsValue != nil { + if values.IsEmpty() { + delete(stringEqualsValue, "s3:prefix") + } + if len(stringEqualsValue) == 0 { + delete(statement.Conditions, "StringEquals") + } + } + + if len(statement.Conditions) == 0 { + statement.Conditions = nil + statement.Actions = statement.Actions.Difference(readOnlyBucketActions) + } + } + } + + removeWriteOnly := func() { + if statement.Conditions == nil { + statement.Actions = statement.Actions.Difference(writeOnlyBucketActions) + } + } + + if len(statement.Resources) > 1 { + statement.Resources.Remove(bucketResource) + } else { + if !readOnlyInUse { + removeReadOnly() + } + + if !writeOnlyInUse { + removeWriteOnly() + } + } + + return statement +} + +// Returns statements containing removed actions/statements for given +// policy, bucket name and prefix. +func removeStatements(statements []Statement, bucketName string, prefix string) []Statement { + bucketResource := awsResourcePrefix + bucketName + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + readOnlyInUse, writeOnlyInUse := getInUsePolicy(statements, bucketName, prefix) + + out := []Statement{} + readOnlyBucketStatements := []Statement{} + s3PrefixValues := set.NewStringSet() + + for _, statement := range statements { + if !isValidStatement(statement, bucketName) { + out = append(out, statement) + continue + } + + if statement.Resources.Contains(bucketResource) { + if statement.Conditions != nil { + statement = removeBucketActions(statement, prefix, bucketResource, false, false) + } else { + statement = removeBucketActions(statement, prefix, bucketResource, readOnlyInUse, writeOnlyInUse) + } + } else if statement.Resources.Contains(objectResource) { + statement = removeObjectActions(statement, objectResource) + } + + if !statement.Actions.IsEmpty() { + if statement.Resources.Contains(bucketResource) && + statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") { + + if statement.Conditions != nil { + stringEqualsValue := statement.Conditions["StringEquals"] + values := set.NewStringSet() + if stringEqualsValue != nil { + values = stringEqualsValue["s3:prefix"] + if values == nil { + values = set.NewStringSet() + } + } + s3PrefixValues = s3PrefixValues.Union(values.ApplyFunc(func(v string) string { + return bucketResource + "/" + v + "*" + })) + } else if !s3PrefixValues.IsEmpty() { + readOnlyBucketStatements = append(readOnlyBucketStatements, statement) + continue + } + } + out = append(out, statement) + } + } + + skipBucketStatement := true + resourcePrefix := awsResourcePrefix + bucketName + "/" + for _, statement := range out { + if !statement.Resources.FuncMatch(startsWithFunc, resourcePrefix).IsEmpty() && + s3PrefixValues.Intersection(statement.Resources).IsEmpty() { + skipBucketStatement = false + break + } + } + + for _, statement := range readOnlyBucketStatements { + if skipBucketStatement && + statement.Resources.Contains(bucketResource) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + continue + } + + out = append(out, statement) + } + + if len(out) == 1 { + statement := out[0] + if statement.Resources.Contains(bucketResource) && + statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && + statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + out = []Statement{} + } + } + + return out +} + +// Appends given statement into statement list to have unique statements. +// - If statement already exists in statement list, it ignores. +// - If statement exists with different conditions, they are merged. +// - Else the statement is appended to statement list. +func appendStatement(statements []Statement, statement Statement) []Statement { + for i, s := range statements { + if s.Actions.Equals(statement.Actions) && + s.Effect == statement.Effect && + s.Principal.AWS.Equals(statement.Principal.AWS) && + reflect.DeepEqual(s.Conditions, statement.Conditions) { + statements[i].Resources = s.Resources.Union(statement.Resources) + return statements + } else if s.Resources.Equals(statement.Resources) && + s.Effect == statement.Effect && + s.Principal.AWS.Equals(statement.Principal.AWS) && + reflect.DeepEqual(s.Conditions, statement.Conditions) { + statements[i].Actions = s.Actions.Union(statement.Actions) + return statements + } + + if s.Resources.Intersection(statement.Resources).Equals(statement.Resources) && + s.Actions.Intersection(statement.Actions).Equals(statement.Actions) && + s.Effect == statement.Effect && + s.Principal.AWS.Intersection(statement.Principal.AWS).Equals(statement.Principal.AWS) { + if reflect.DeepEqual(s.Conditions, statement.Conditions) { + return statements + } + if s.Conditions != nil && statement.Conditions != nil { + if s.Resources.Equals(statement.Resources) { + statements[i].Conditions = mergeConditionMap(s.Conditions, statement.Conditions) + return statements + } + } + } + } + + if !(statement.Actions.IsEmpty() && statement.Resources.IsEmpty()) { + return append(statements, statement) + } + + return statements +} + +// Appends two statement lists. +func appendStatements(statements []Statement, appendStatements []Statement) []Statement { + for _, s := range appendStatements { + statements = appendStatement(statements, s) + } + + return statements +} + +// Returns policy of given bucket statement. +func getBucketPolicy(statement Statement, prefix string) (commonFound, readOnly, writeOnly bool) { + if !(statement.Effect == "Allow" && statement.Principal.AWS.Contains("*")) { + return commonFound, readOnly, writeOnly + } + + if statement.Actions.Intersection(commonBucketActions).Equals(commonBucketActions) && + statement.Conditions == nil { + commonFound = true + } + + if statement.Actions.Intersection(writeOnlyBucketActions).Equals(writeOnlyBucketActions) && + statement.Conditions == nil { + writeOnly = true + } + + if statement.Actions.Intersection(readOnlyBucketActions).Equals(readOnlyBucketActions) { + if prefix != "" && statement.Conditions != nil { + if stringEqualsValue, ok := statement.Conditions["StringEquals"]; ok { + if s3PrefixValues, ok := stringEqualsValue["s3:prefix"]; ok { + if s3PrefixValues.Contains(prefix) { + readOnly = true + } + } + } else if stringNotEqualsValue, ok := statement.Conditions["StringNotEquals"]; ok { + if s3PrefixValues, ok := stringNotEqualsValue["s3:prefix"]; ok { + if !s3PrefixValues.Contains(prefix) { + readOnly = true + } + } + } + } else if prefix == "" && statement.Conditions == nil { + readOnly = true + } else if prefix != "" && statement.Conditions == nil { + readOnly = true + } + } + + return commonFound, readOnly, writeOnly +} + +// Returns policy of given object statement. +func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) { + if statement.Effect == "Allow" && + statement.Principal.AWS.Contains("*") && + statement.Conditions == nil { + if statement.Actions.Intersection(readOnlyObjectActions).Equals(readOnlyObjectActions) { + readOnly = true + } + if statement.Actions.Intersection(writeOnlyObjectActions).Equals(writeOnlyObjectActions) { + writeOnly = true + } + } + + return readOnly, writeOnly +} + +// GetPolicy - Returns policy of given bucket name, prefix in given statements. +func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy { + bucketResource := awsResourcePrefix + bucketName + objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*" + + bucketCommonFound := false + bucketReadOnly := false + bucketWriteOnly := false + matchedResource := "" + objReadOnly := false + objWriteOnly := false + + for _, s := range statements { + matchedObjResources := set.NewStringSet() + if s.Resources.Contains(objectResource) { + matchedObjResources.Add(objectResource) + } else { + matchedObjResources = s.Resources.FuncMatch(resourceMatch, objectResource) + } + + if !matchedObjResources.IsEmpty() { + readOnly, writeOnly := getObjectPolicy(s) + for resource := range matchedObjResources { + if len(matchedResource) < len(resource) { + objReadOnly = readOnly + objWriteOnly = writeOnly + matchedResource = resource + } else if len(matchedResource) == len(resource) { + objReadOnly = objReadOnly || readOnly + objWriteOnly = objWriteOnly || writeOnly + matchedResource = resource + } + } + } else if s.Resources.Contains(bucketResource) { + commonFound, readOnly, writeOnly := getBucketPolicy(s, prefix) + bucketCommonFound = bucketCommonFound || commonFound + bucketReadOnly = bucketReadOnly || readOnly + bucketWriteOnly = bucketWriteOnly || writeOnly + } + } + + policy := BucketPolicyNone + if bucketCommonFound { + if bucketReadOnly && bucketWriteOnly && objReadOnly && objWriteOnly { + policy = BucketPolicyReadWrite + } else if bucketReadOnly && objReadOnly { + policy = BucketPolicyReadOnly + } else if bucketWriteOnly && objWriteOnly { + policy = BucketPolicyWriteOnly + } + } + + return policy +} + +// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements. +func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy { + policyRules := map[string]BucketPolicy{} + objResources := set.NewStringSet() + // Search all resources related to objects policy + for _, s := range statements { + for r := range s.Resources { + if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") { + objResources.Add(r) + } + } + } + // Pretend that policy resource as an actual object and fetch its policy + for r := range objResources { + // Put trailing * if exists in asterisk + asterisk := "" + if strings.HasSuffix(r, "*") { + r = r[:len(r)-1] + asterisk = "*" + } + objectPath := r[len(awsResourcePrefix+bucketName)+1:] + p := GetPolicy(statements, bucketName, objectPath) + policyRules[bucketName+"/"+objectPath+asterisk] = p + } + return policyRules +} + +// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended. +func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement { + out := removeStatements(statements, bucketName, prefix) + // fmt.Println("out = ") + // printstatement(out) + ns := newStatements(policy, bucketName, prefix) + // fmt.Println("ns = ") + // printstatement(ns) + + rv := appendStatements(out, ns) + // fmt.Println("rv = ") + // printstatement(rv) + + return rv +} + +// Match function matches wild cards in 'pattern' for resource. +func resourceMatch(pattern, resource string) bool { + if pattern == "" { + return resource == pattern + } + if pattern == "*" { + return true + } + parts := strings.Split(pattern, "*") + if len(parts) == 1 { + return resource == pattern + } + tGlob := strings.HasSuffix(pattern, "*") + end := len(parts) - 1 + if !strings.HasPrefix(resource, parts[0]) { + return false + } + for i := 1; i < end; i++ { + if !strings.Contains(resource, parts[i]) { + return false + } + idx := strings.Index(resource, parts[i]) + len(parts[i]) + resource = resource[idx:] + } + return tGlob || strings.HasSuffix(resource, parts[end]) +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go new file mode 100644 index 000000000..755fd1ac5 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-streaming.go @@ -0,0 +1,285 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" +) + +// Reference for constants used below - +// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming.html#example-signature-calculations-streaming +const ( + streamingSignAlgorithm = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" + streamingEncoding = "aws-chunked" + streamingPayloadHdr = "AWS4-HMAC-SHA256-PAYLOAD" + emptySHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + payloadChunkSize = 64 * 1024 + chunkSigConstLen = 17 // ";chunk-signature=" + signatureStrLen = 64 // e.g. "f2ca1bb6c7e907d06dafe4687e579fce76b37e4e93b7605022da52e6ccc26fd2" + crlfLen = 2 // CRLF +) + +// Request headers to be ignored while calculating seed signature for +// a request. +var ignoredStreamingHeaders = map[string]bool{ + "Authorization": true, + "User-Agent": true, + "Content-Type": true, +} + +// getSignedChunkLength - calculates the length of chunk metadata +func getSignedChunkLength(chunkDataSize int64) int64 { + return int64(len(fmt.Sprintf("%x", chunkDataSize))) + + chunkSigConstLen + + signatureStrLen + + crlfLen + + chunkDataSize + + crlfLen +} + +// getStreamLength - calculates the length of the overall stream (data + metadata) +func getStreamLength(dataLen, chunkSize int64) int64 { + if dataLen <= 0 { + return 0 + } + + chunksCount := int64(dataLen / chunkSize) + remainingBytes := int64(dataLen % chunkSize) + streamLen := int64(0) + streamLen += chunksCount * getSignedChunkLength(chunkSize) + if remainingBytes > 0 { + streamLen += getSignedChunkLength(remainingBytes) + } + streamLen += getSignedChunkLength(0) + return streamLen +} + +// buildChunkStringToSign - returns the string to sign given chunk data +// and previous signature. +func buildChunkStringToSign(t time.Time, region, previousSig string, chunkData []byte) string { + stringToSignParts := []string{ + streamingPayloadHdr, + t.Format(iso8601DateFormat), + getScope(region, t), + previousSig, + emptySHA256, + hex.EncodeToString(sum256(chunkData)), + } + + return strings.Join(stringToSignParts, "\n") +} + +// prepareStreamingRequest - prepares a request with appropriate +// headers before computing the seed signature. +func prepareStreamingRequest(req *http.Request, dataLen int64, timestamp time.Time) { + // Set x-amz-content-sha256 header. + req.Header.Set("X-Amz-Content-Sha256", streamingSignAlgorithm) + req.Header.Set("Content-Encoding", streamingEncoding) + req.Header.Set("X-Amz-Date", timestamp.Format(iso8601DateFormat)) + + // Set content length with streaming signature for each chunk included. + req.ContentLength = getStreamLength(dataLen, int64(payloadChunkSize)) + req.Header.Set("x-amz-decoded-content-length", strconv.FormatInt(dataLen, 10)) +} + +// buildChunkHeader - returns the chunk header. +// e.g string(IntHexBase(chunk-size)) + ";chunk-signature=" + signature + \r\n + chunk-data + \r\n +func buildChunkHeader(chunkLen int64, signature string) []byte { + return []byte(strconv.FormatInt(chunkLen, 16) + ";chunk-signature=" + signature + "\r\n") +} + +// buildChunkSignature - returns chunk signature for a given chunk and previous signature. +func buildChunkSignature(chunkData []byte, reqTime time.Time, region, + previousSignature, secretAccessKey string) string { + + chunkStringToSign := buildChunkStringToSign(reqTime, region, + previousSignature, chunkData) + signingKey := getSigningKey(secretAccessKey, region, reqTime) + return getSignature(signingKey, chunkStringToSign) +} + +// getSeedSignature - returns the seed signature for a given request. +func (s *StreamingReader) setSeedSignature(req *http.Request) { + // Get canonical request + canonicalRequest := getCanonicalRequest(*req, ignoredStreamingHeaders) + + // Get string to sign from canonical request. + stringToSign := getStringToSignV4(s.reqTime, s.region, canonicalRequest) + + signingKey := getSigningKey(s.secretAccessKey, s.region, s.reqTime) + + // Calculate signature. + s.seedSignature = getSignature(signingKey, stringToSign) +} + +// StreamingReader implements chunked upload signature as a reader on +// top of req.Body's ReaderCloser chunk header;data;... repeat +type StreamingReader struct { + accessKeyID string + secretAccessKey string + region string + prevSignature string + seedSignature string + contentLen int64 // Content-Length from req header + baseReadCloser io.ReadCloser // underlying io.Reader + bytesRead int64 // bytes read from underlying io.Reader + buf bytes.Buffer // holds signed chunk + chunkBuf []byte // holds raw data read from req Body + chunkBufLen int // no. of bytes read so far into chunkBuf + done bool // done reading the underlying reader to EOF + reqTime time.Time + chunkNum int + totalChunks int + lastChunkSize int +} + +// signChunk - signs a chunk read from s.baseReader of chunkLen size. +func (s *StreamingReader) signChunk(chunkLen int) { + // Compute chunk signature for next header + signature := buildChunkSignature(s.chunkBuf[:chunkLen], s.reqTime, + s.region, s.prevSignature, s.secretAccessKey) + + // For next chunk signature computation + s.prevSignature = signature + + // Write chunk header into streaming buffer + chunkHdr := buildChunkHeader(int64(chunkLen), signature) + s.buf.Write(chunkHdr) + + // Write chunk data into streaming buffer + s.buf.Write(s.chunkBuf[:chunkLen]) + + // Write the chunk trailer. + s.buf.Write([]byte("\r\n")) + + // Reset chunkBufLen for next chunk read. + s.chunkBufLen = 0 + s.chunkNum++ +} + +// setStreamingAuthHeader - builds and sets authorization header value +// for streaming signature. +func (s *StreamingReader) setStreamingAuthHeader(req *http.Request) { + credential := GetCredential(s.accessKeyID, s.region, s.reqTime) + authParts := []string{ + signV4Algorithm + " Credential=" + credential, + "SignedHeaders=" + getSignedHeaders(*req, ignoredStreamingHeaders), + "Signature=" + s.seedSignature, + } + + // Set authorization header. + auth := strings.Join(authParts, ",") + req.Header.Set("Authorization", auth) +} + +// StreamingSignV4 - provides chunked upload signatureV4 support by +// implementing io.Reader. +func StreamingSignV4(req *http.Request, accessKeyID, secretAccessKey, + region string, dataLen int64, reqTime time.Time) *http.Request { + + // Set headers needed for streaming signature. + prepareStreamingRequest(req, dataLen, reqTime) + + stReader := &StreamingReader{ + baseReadCloser: req.Body, + accessKeyID: accessKeyID, + secretAccessKey: secretAccessKey, + region: region, + reqTime: reqTime, + chunkBuf: make([]byte, payloadChunkSize), + contentLen: dataLen, + chunkNum: 1, + totalChunks: int((dataLen+payloadChunkSize-1)/payloadChunkSize) + 1, + lastChunkSize: int(dataLen % payloadChunkSize), + } + + // Add the request headers required for chunk upload signing. + + // Compute the seed signature. + stReader.setSeedSignature(req) + + // Set the authorization header with the seed signature. + stReader.setStreamingAuthHeader(req) + + // Set seed signature as prevSignature for subsequent + // streaming signing process. + stReader.prevSignature = stReader.seedSignature + req.Body = stReader + + return req +} + +// Read - this method performs chunk upload signature providing a +// io.Reader interface. +func (s *StreamingReader) Read(buf []byte) (int, error) { + switch { + // After the last chunk is read from underlying reader, we + // never re-fill s.buf. + case s.done: + + // s.buf will be (re-)filled with next chunk when has lesser + // bytes than asked for. + case s.buf.Len() < len(buf): + s.chunkBufLen = 0 + for { + n1, err := s.baseReadCloser.Read(s.chunkBuf[s.chunkBufLen:]) + if err == nil || err == io.ErrUnexpectedEOF { + s.chunkBufLen += n1 + s.bytesRead += int64(n1) + + if s.chunkBufLen == payloadChunkSize || + (s.chunkNum == s.totalChunks-1 && + s.chunkBufLen == s.lastChunkSize) { + // Sign the chunk and write it to s.buf. + s.signChunk(s.chunkBufLen) + break + } + + } else if err == io.EOF { + // No more data left in baseReader - last chunk. + // Done reading the last chunk from baseReader. + s.done = true + + // bytes read from baseReader different than + // content length provided. + if s.bytesRead != s.contentLen { + return 0, io.ErrUnexpectedEOF + } + + // Sign the chunk and write it to s.buf. + s.signChunk(0) + break + + } else { + return 0, err + } + } + } + return s.buf.Read(buf) +} + +// Close - this method makes underlying io.ReadCloser's Close method available. +func (s *StreamingReader) Close() error { + return s.baseReadCloser.Close() +} diff --git a/vendor/github.com/minio/minio-go/request-signature-v2.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go similarity index 69% rename from vendor/github.com/minio/minio-go/request-signature-v2.go rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go index 38224557e..af0e91515 100644 --- a/vendor/github.com/minio/minio-go/request-signature-v2.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v2.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package minio +package s3signer import ( "bytes" @@ -29,6 +29,8 @@ import ( "strconv" "strings" "time" + + "github.com/minio/minio-go/pkg/s3utils" ) // Signature and API related constants. @@ -45,45 +47,38 @@ func encodeURL2Path(u *url.URL) (path string) { bucketName := hostSplits[0] path = "/" + bucketName path += u.Path - path = urlEncodePath(path) + path = s3utils.EncodePath(path) return } if strings.HasSuffix(u.Host, ".storage.googleapis.com") { path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com") path += u.Path - path = urlEncodePath(path) + path = s3utils.EncodePath(path) return } - path = urlEncodePath(u.Path) + path = s3utils.EncodePath(u.Path) return } -// preSignV2 - presign the request in following style. +// PreSignV2 - presign the request in following style. // https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}. -func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { +func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req } - d := time.Now().UTC() - // Add date if not present. - if date := req.Header.Get("Date"); date == "" { - req.Header.Set("Date", d.Format(http.TimeFormat)) - } - - // Get encoded URL path. - path := encodeURL2Path(req.URL) - if len(req.URL.Query()) > 0 { - // Keep the usual queries unescaped for string to sign. - query, _ := url.QueryUnescape(queryEncode(req.URL.Query())) - path = path + "?" + query - } + d := time.Now().UTC() // Find epoch expires when the request will expire. epochExpires := d.Unix() + expires - // Get string to sign. - stringToSign := fmt.Sprintf("%s\n\n\n%d\n%s", req.Method, epochExpires, path) + // Add expires header if not present. + if expiresStr := req.Header.Get("Expires"); expiresStr == "" { + req.Header.Set("Expires", strconv.FormatInt(epochExpires, 10)) + } + + // Get presigned string to sign. + stringToSign := preStringifyHTTPReq(req) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -102,18 +97,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in query.Set("Expires", strconv.FormatInt(epochExpires, 10)) // Encode query and save. - req.URL.RawQuery = queryEncode(query) + req.URL.RawQuery = s3utils.QueryEncode(query) // Save signature finally. - req.URL.RawQuery += "&Signature=" + urlEncodePath(signature) + req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature) // Return. return &req } -// postPresignSignatureV2 - presigned signature for PostPolicy +// PostPresignSignatureV2 - presigned signature for PostPolicy // request. -func postPresignSignatureV2(policyBase64, secretAccessKey string) string { +func PostPresignSignatureV2(policyBase64, secretAccessKey string) string { hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(policyBase64)) signature := base64.StdEncoding.EncodeToString(hm.Sum(nil)) @@ -124,7 +119,7 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string { // Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ); // // StringToSign = HTTP-Verb + "\n" + -// Content-MD5 + "\n" + +// Content-Md5 + "\n" + // Content-Type + "\n" + // Date + "\n" + // CanonicalizedProtocolHeaders + @@ -136,8 +131,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string { // // CanonicalizedProtocolHeaders = -// signV2 sign the request before Do() (AWS Signature Version 2). -func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { +// SignV2 sign the request before Do() (AWS Signature Version 2). +func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -152,7 +147,7 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request } // Calculate HMAC for secretAccessKey. - stringToSign := getStringToSignV2(req) + stringToSign := stringifyHTTPReq(req) hm := hmac.New(sha1.New, []byte(secretAccessKey)) hm.Write([]byte(stringToSign)) @@ -172,32 +167,57 @@ func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request // From the Amazon docs: // // StringToSign = HTTP-Verb + "\n" + -// Content-MD5 + "\n" + +// Content-Md5 + "\n" + +// Content-Type + "\n" + +// Expires + "\n" + +// CanonicalizedProtocolHeaders + +// CanonicalizedResource; +func preStringifyHTTPReq(req http.Request) string { + buf := new(bytes.Buffer) + // Write standard headers. + writePreSignV2Headers(buf, req) + // Write canonicalized protocol headers if any. + writeCanonicalizedHeaders(buf, req) + // Write canonicalized Query resources if any. + isPreSign := true + writeCanonicalizedResource(buf, req, isPreSign) + return buf.String() +} + +// writePreSignV2Headers - write preSign v2 required headers. +func writePreSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Expires") + "\n") +} + +// From the Amazon docs: +// +// StringToSign = HTTP-Verb + "\n" + +// Content-Md5 + "\n" + // Content-Type + "\n" + // Date + "\n" + // CanonicalizedProtocolHeaders + // CanonicalizedResource; -func getStringToSignV2(req http.Request) string { +func stringifyHTTPReq(req http.Request) string { buf := new(bytes.Buffer) // Write standard headers. - writeDefaultHeaders(buf, req) + writeSignV2Headers(buf, req) // Write canonicalized protocol headers if any. writeCanonicalizedHeaders(buf, req) // Write canonicalized Query resources if any. - writeCanonicalizedResource(buf, req) + isPreSign := false + writeCanonicalizedResource(buf, req, isPreSign) return buf.String() } -// writeDefaultHeader - write all default necessary headers -func writeDefaultHeaders(buf *bytes.Buffer, req http.Request) { - buf.WriteString(req.Method) - buf.WriteByte('\n') - buf.WriteString(req.Header.Get("Content-MD5")) - buf.WriteByte('\n') - buf.WriteString(req.Header.Get("Content-Type")) - buf.WriteByte('\n') - buf.WriteString(req.Header.Get("Date")) - buf.WriteByte('\n') +// writeSignV2Headers - write signV2 required headers. +func writeSignV2Headers(buf *bytes.Buffer, req http.Request) { + buf.WriteString(req.Method + "\n") + buf.WriteString(req.Header.Get("Content-Md5") + "\n") + buf.WriteString(req.Header.Get("Content-Type") + "\n") + buf.WriteString(req.Header.Get("Date") + "\n") } // writeCanonicalizedHeaders - write canonicalized headers. @@ -235,20 +255,16 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) { } } -// Must be sorted: +// The following list is already sorted and should always be, otherwise we could +// have signature-related issues var resourceList = []string{ "acl", + "delete", "location", "logging", "notification", "partNumber", "policy", - "response-content-type", - "response-content-language", - "response-expires", - "response-cache-control", - "response-content-disposition", - "response-content-encoding", "requestPayment", "torrent", "uploadId", @@ -264,14 +280,22 @@ var resourceList = []string{ // CanonicalizedResource = [ "/" + Bucket ] + // + // [ sub-resource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; -func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { +func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign bool) { // Save request URL. requestURL := req.URL // Get encoded URL path. path := encodeURL2Path(requestURL) + if isPreSign { + // Get encoded URL path. + if len(requestURL.Query()) > 0 { + // Keep the usual queries unescaped for string to sign. + query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query())) + path = path + "?" + query + } + buf.WriteString(path) + return + } buf.WriteString(path) - - sort.Strings(resourceList) if requestURL.RawQuery != "" { var n int vals, _ := url.ParseQuery(requestURL.RawQuery) @@ -292,7 +316,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request) { // Request parameters if len(vv[0]) > 0 { buf.WriteByte('=') - buf.WriteString(strings.Replace(url.QueryEscape(vv[0]), "+", "%20", -1)) + buf.WriteString(vv[0]) } } } diff --git a/vendor/github.com/minio/minio-go/request-signature-v4.go b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go similarity index 85% rename from vendor/github.com/minio/minio-go/request-signature-v4.go rename to vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go index dfd11e9e4..245fb08c3 100644 --- a/vendor/github.com/minio/minio-go/request-signature-v4.go +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/request-signature-v4.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package minio +package s3signer import ( "bytes" @@ -24,6 +24,8 @@ import ( "strconv" "strings" "time" + + "github.com/minio/minio-go/pkg/s3utils" ) // Signature and API related constants. @@ -68,7 +70,7 @@ const ( /// /// Is skipped for obvious reasons /// -var ignoredHeaders = map[string]bool{ +var v4IgnoredHeaders = map[string]bool{ "Authorization": true, "Content-Type": true, "Content-Length": true, @@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string { return scope } -// getCredential generate a credential string. -func getCredential(accessKeyID, location string, t time.Time) string { +// GetCredential generate a credential string. +func GetCredential(accessKeyID, location string, t time.Time) string { scope := getScope(location, t) return accessKeyID + "/" + scope } @@ -113,14 +115,14 @@ func getHashedPayload(req http.Request) string { hashedPayload := req.Header.Get("X-Amz-Content-Sha256") if hashedPayload == "" { // Presign does not have a payload, use S3 recommended value. - hashedPayload = "UNSIGNED-PAYLOAD" + hashedPayload = unsignedPayload } return hashedPayload } // getCanonicalHeaders generate a list of request headers for // signature. -func getCanonicalHeaders(req http.Request) string { +func getCanonicalHeaders(req http.Request, ignoredHeaders map[string]bool) string { var headers []string vals := make(map[string][]string) for k, vv := range req.Header { @@ -159,7 +161,7 @@ func getCanonicalHeaders(req http.Request) string { // getSignedHeaders generate all signed request headers. // i.e lexically sorted, semicolon-separated list of lowercase // request header names. -func getSignedHeaders(req http.Request) string { +func getSignedHeaders(req http.Request, ignoredHeaders map[string]bool) string { var headers []string for k := range req.Header { if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { @@ -181,14 +183,14 @@ func getSignedHeaders(req http.Request) string { // \n // \n // -func getCanonicalRequest(req http.Request) string { +func getCanonicalRequest(req http.Request, ignoredHeaders map[string]bool) string { req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1) canonicalRequest := strings.Join([]string{ req.Method, - urlEncodePath(req.URL.Path), + s3utils.EncodePath(req.URL.Path), req.URL.RawQuery, - getCanonicalHeaders(req), - getSignedHeaders(req), + getCanonicalHeaders(req, ignoredHeaders), + getSignedHeaders(req, ignoredHeaders), getHashedPayload(req), }, "\n") return canonicalRequest @@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string { return stringToSign } -// preSignV4 presign the request, in accordance with +// PreSignV4 presign the request, in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html. -func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { +func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request { // Presign is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -214,10 +216,10 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, t := time.Now().UTC() // Get credential string. - credential := getCredential(accessKeyID, location, t) + credential := GetCredential(accessKeyID, location, t) // Get all signed headers. - signedHeaders := getSignedHeaders(req) + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) // Set URL query. query := req.URL.Query() @@ -229,7 +231,7 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, req.URL.RawQuery = query.Encode() // Get canonical request. - canonicalRequest := getCanonicalRequest(req) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) @@ -246,9 +248,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, return &req } -// postPresignSignatureV4 - presigned signature for PostPolicy +// PostPresignSignatureV4 - presigned signature for PostPolicy // requests. -func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { +func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string { // Get signining key. signingkey := getSigningKey(secretAccessKey, location, t) // Calculate signature. @@ -256,9 +258,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l return signature } -// signV4 sign the request before Do(), in accordance with +// SignV4 sign the request before Do(), in accordance with // http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html. -func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { +func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request { // Signature calculation is not needed for anonymous credentials. if accessKeyID == "" || secretAccessKey == "" { return &req @@ -271,7 +273,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht req.Header.Set("X-Amz-Date", t.Format(iso8601DateFormat)) // Get canonical request. - canonicalRequest := getCanonicalRequest(req) + canonicalRequest := getCanonicalRequest(req, v4IgnoredHeaders) // Get string to sign from canonical request. stringToSign := getStringToSignV4(t, location, canonicalRequest) @@ -280,10 +282,10 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht signingKey := getSigningKey(secretAccessKey, location, t) // Get credential string. - credential := getCredential(accessKeyID, location, t) + credential := GetCredential(accessKeyID, location, t) // Get all signed headers. - signedHeaders := getSignedHeaders(req) + signedHeaders := getSignedHeaders(req, v4IgnoredHeaders) // Calculate signature. signature := getSignature(signingKey, stringToSign) diff --git a/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go new file mode 100644 index 000000000..0619b3082 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3signer/utils.go @@ -0,0 +1,39 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3signer + +import ( + "crypto/hmac" + "crypto/sha256" +) + +// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when +const unsignedPayload = "UNSIGNED-PAYLOAD" + +// sum256 calculate sha256 sum for an input byte array. +func sum256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +// sumHMAC calculate hmac between two input byte array. +func sumHMAC(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} diff --git a/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go new file mode 100644 index 000000000..a3b6ed845 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/s3utils/utils.go @@ -0,0 +1,183 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3utils + +import ( + "bytes" + "encoding/hex" + "net" + "net/url" + "regexp" + "sort" + "strings" + "unicode/utf8" +) + +// Sentinel URL is the default url value which is invalid. +var sentinelURL = url.URL{} + +// IsValidDomain validates if input string is a valid domain name. +func IsValidDomain(host string) bool { + // See RFC 1035, RFC 3696. + host = strings.TrimSpace(host) + if len(host) == 0 || len(host) > 255 { + return false + } + // host cannot start or end with "-" + if host[len(host)-1:] == "-" || host[:1] == "-" { + return false + } + // host cannot start or end with "_" + if host[len(host)-1:] == "_" || host[:1] == "_" { + return false + } + // host cannot start or end with a "." + if host[len(host)-1:] == "." || host[:1] == "." { + return false + } + // All non alphanumeric characters are invalid. + if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 0 { + buf.WriteByte('&') + } + buf.WriteString(prefix) + buf.WriteString(percentEncodeSlash(EncodePath(v))) + } + } + return buf.String() +} + +// if object matches reserved string, no need to encode them +var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") + +// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences +// +// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 +// non english characters cannot be parsed due to the nature in which url.Encode() is written +// +// This function on the other hand is a direct replacement for url.Encode() technique to support +// pretty much every UTF-8 character. +func EncodePath(pathName string) string { + if reservedObjectNames.MatchString(pathName) { + return pathName + } + var encodedPathname string + for _, s := range pathName { + if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + } + switch s { + case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) + encodedPathname = encodedPathname + string(s) + continue + default: + len := utf8.RuneLen(s) + if len < 0 { + // if utf8 cannot convert return the same string as is + return pathName + } + u := make([]byte, len) + utf8.EncodeRune(u, s) + for _, r := range u { + hex := hex.EncodeToString([]byte{r}) + encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) + } + } + } + return encodedPathname +} diff --git a/vendor/github.com/minio/minio-go/pkg/set/stringset.go b/vendor/github.com/minio/minio-go/pkg/set/stringset.go new file mode 100644 index 000000000..9f33488e0 --- /dev/null +++ b/vendor/github.com/minio/minio-go/pkg/set/stringset.go @@ -0,0 +1,196 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package set + +import ( + "encoding/json" + "fmt" + "sort" +) + +// StringSet - uses map as set of strings. +type StringSet map[string]struct{} + +// ToSlice - returns StringSet as string slice. +func (set StringSet) ToSlice() []string { + keys := make([]string, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// IsEmpty - returns whether the set is empty or not. +func (set StringSet) IsEmpty() bool { + return len(set) == 0 +} + +// Add - adds string to the set. +func (set StringSet) Add(s string) { + set[s] = struct{}{} +} + +// Remove - removes string in the set. It does nothing if string does not exist in the set. +func (set StringSet) Remove(s string) { + delete(set, s) +} + +// Contains - checks if string is in the set. +func (set StringSet) Contains(s string) bool { + _, ok := set[s] + return ok +} + +// FuncMatch - returns new set containing each value who passes match function. +// A 'matchFn' should accept element in a set as first argument and +// 'matchString' as second argument. The function can do any logic to +// compare both the arguments and should return true to accept element in +// a set to include in output set else the element is ignored. +func (set StringSet) FuncMatch(matchFn func(string, string) bool, matchString string) StringSet { + nset := NewStringSet() + for k := range set { + if matchFn(k, matchString) { + nset.Add(k) + } + } + return nset +} + +// ApplyFunc - returns new set containing each value processed by 'applyFn'. +// A 'applyFn' should accept element in a set as a argument and return +// a processed string. The function can do any logic to return a processed +// string. +func (set StringSet) ApplyFunc(applyFn func(string) string) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(applyFn(k)) + } + return nset +} + +// Equals - checks whether given set is equal to current set or not. +func (set StringSet) Equals(sset StringSet) bool { + // If length of set is not equal to length of given set, the + // set is not equal to given set. + if len(set) != len(sset) { + return false + } + + // As both sets are equal in length, check each elements are equal. + for k := range set { + if _, ok := sset[k]; !ok { + return false + } + } + + return true +} + +// Intersection - returns the intersection with given set as new set. +func (set StringSet) Intersection(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; ok { + nset.Add(k) + } + } + + return nset +} + +// Difference - returns the difference with given set as new set. +func (set StringSet) Difference(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.Add(k) + } + } + + return nset +} + +// Union - returns the union with given set as new set. +func (set StringSet) Union(sset StringSet) StringSet { + nset := NewStringSet() + for k := range set { + nset.Add(k) + } + + for k := range sset { + nset.Add(k) + } + + return nset +} + +// MarshalJSON - converts to JSON data. +func (set StringSet) MarshalJSON() ([]byte, error) { + return json.Marshal(set.ToSlice()) +} + +// UnmarshalJSON - parses JSON data and creates new set with it. +// If 'data' contains JSON string array, the set contains each string. +// If 'data' contains JSON string, the set contains the string as one element. +// If 'data' contains Other JSON types, JSON parse error is returned. +func (set *StringSet) UnmarshalJSON(data []byte) error { + sl := []string{} + var err error + if err = json.Unmarshal(data, &sl); err == nil { + *set = make(StringSet) + for _, s := range sl { + set.Add(s) + } + } else { + var s string + if err = json.Unmarshal(data, &s); err == nil { + *set = make(StringSet) + set.Add(s) + } + } + + return err +} + +// String - returns printable string of the set. +func (set StringSet) String() string { + return fmt.Sprintf("%s", set.ToSlice()) +} + +// NewStringSet - creates new string set. +func NewStringSet() StringSet { + return make(StringSet) +} + +// CreateStringSet - creates new string set with given string values. +func CreateStringSet(sl ...string) StringSet { + set := make(StringSet) + for _, k := range sl { + set.Add(k) + } + return set +} + +// CopyStringSet - returns copy of given set. +func CopyStringSet(set StringSet) StringSet { + nset := NewStringSet() + for k, v := range set { + nset[k] = v + } + return nset +} diff --git a/vendor/github.com/minio/minio-go/post-policy.go b/vendor/github.com/minio/minio-go/post-policy.go index 2a675d770..5e716124a 100644 --- a/vendor/github.com/minio/minio-go/post-policy.go +++ b/vendor/github.com/minio/minio-go/post-policy.go @@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error { return nil } +// SetSuccessStatusAction - Sets the status success code of the object for this policy +// based upload. +func (p *PostPolicy) SetSuccessStatusAction(status string) error { + if strings.TrimSpace(status) == "" || status == "" { + return ErrInvalidArgument("Status is empty") + } + policyCond := policyCondition{ + matchType: "eq", + condition: "$success_action_status", + value: status, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData["success_action_status"] = status + return nil +} + // addNewPolicy - internal helper to validate adding new policies. func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error { if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" { diff --git a/vendor/github.com/minio/minio-go/request-headers.go b/vendor/github.com/minio/minio-go/request-headers.go new file mode 100644 index 000000000..fa23b2fe3 --- /dev/null +++ b/vendor/github.com/minio/minio-go/request-headers.go @@ -0,0 +1,111 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016-17 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "fmt" + "net/http" + "time" +) + +// RequestHeaders - implement methods for setting special +// request headers for GET, HEAD object operations. +// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html +type RequestHeaders struct { + http.Header +} + +// NewGetReqHeaders - initializes a new request headers for GET request. +func NewGetReqHeaders() RequestHeaders { + return RequestHeaders{ + Header: make(http.Header), + } +} + +// NewHeadReqHeaders - initializes a new request headers for HEAD request. +func NewHeadReqHeaders() RequestHeaders { + return RequestHeaders{ + Header: make(http.Header), + } +} + +// SetMatchETag - set match etag. +func (c RequestHeaders) SetMatchETag(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.Set("If-Match", etag) + return nil +} + +// SetMatchETagExcept - set match etag except. +func (c RequestHeaders) SetMatchETagExcept(etag string) error { + if etag == "" { + return ErrInvalidArgument("ETag cannot be empty.") + } + c.Set("If-None-Match", etag) + return nil +} + +// SetUnmodified - set unmodified time since. +func (c RequestHeaders) SetUnmodified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.Set("If-Unmodified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetModified - set modified time since. +func (c RequestHeaders) SetModified(modTime time.Time) error { + if modTime.IsZero() { + return ErrInvalidArgument("Modified since cannot be empty.") + } + c.Set("If-Modified-Since", modTime.Format(http.TimeFormat)) + return nil +} + +// SetRange - set the start and end offset of the object to be read. +// See https://tools.ietf.org/html/rfc7233#section-3.1 for reference. +func (c RequestHeaders) SetRange(start, end int64) error { + switch { + case start == 0 && end < 0: + // Read last '-end' bytes. `bytes=-N`. + c.Set("Range", fmt.Sprintf("bytes=%d", end)) + case 0 < start && end == 0: + // Read everything starting from offset + // 'start'. `bytes=N-`. + c.Set("Range", fmt.Sprintf("bytes=%d-", start)) + case 0 <= start && start <= end: + // Read everything starting at 'start' till the + // 'end'. `bytes=N-M` + c.Set("Range", fmt.Sprintf("bytes=%d-%d", start, end)) + default: + // All other cases such as + // bytes=-3- + // bytes=5-3 + // bytes=-2-4 + // bytes=-3-0 + // bytes=-3--2 + // are invalid. + return ErrInvalidArgument( + fmt.Sprintf( + "Invalid range specified: start=%d end=%d", + start, end)) + } + return nil +} diff --git a/vendor/github.com/minio/minio-go/retry-continous.go b/vendor/github.com/minio/minio-go/retry-continous.go new file mode 100644 index 000000000..e300af69c --- /dev/null +++ b/vendor/github.com/minio/minio-go/retry-continous.go @@ -0,0 +1,52 @@ +package minio + +import "time" + +// newRetryTimerContinous creates a timer with exponentially increasing delays forever. +func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // 1< maxAttempt { + attempt = maxAttempt + } + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + var nextBackoff int + for { + select { + // Attempts starts. + case attemptCh <- nextBackoff: + nextBackoff++ + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(nextBackoff)) + } + }() + return attemptCh +} diff --git a/vendor/github.com/minio/minio-go/retry.go b/vendor/github.com/minio/minio-go/retry.go new file mode 100644 index 000000000..1de5107e4 --- /dev/null +++ b/vendor/github.com/minio/minio-go/retry.go @@ -0,0 +1,152 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +import ( + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// MaxRetry is the maximum number of retries before stopping. +var MaxRetry = 5 + +// MaxJitter will randomize over the full exponential backoff time +const MaxJitter = 1.0 + +// NoJitter disables the use of jitter for randomizing the exponential backoff time +const NoJitter = 0.0 + +// DefaultRetryUnit - default unit multiplicative per retry. +// defaults to 1 second. +const DefaultRetryUnit = time.Second + +// DefaultRetryCap - Each retry attempt never waits no longer than +// this maximum time duration. +const DefaultRetryCap = time.Second * 30 + +// newRetryTimer creates a timer with exponentially increasing +// delays until the maximum retry attempts are reached. +func (c Client) newRetryTimer(maxRetry int, unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int { + attemptCh := make(chan int) + + // computes the exponential backoff duration according to + // https://www.awsarchitectureblog.com/2015/03/backoff.html + exponentialBackoffWait := func(attempt int) time.Duration { + // normalize jitter to the range [0, 1.0] + if jitter < NoJitter { + jitter = NoJitter + } + if jitter > MaxJitter { + jitter = MaxJitter + } + + //sleep = random_between(0, min(cap, base * 2 ** attempt)) + sleep := unit * time.Duration(1< cap { + sleep = cap + } + if jitter != NoJitter { + sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter) + } + return sleep + } + + go func() { + defer close(attemptCh) + for i := 0; i < maxRetry; i++ { + select { + // Attempts start from 1. + case attemptCh <- i + 1: + case <-doneCh: + // Stop the routine. + return + } + time.Sleep(exponentialBackoffWait(i)) + } + }() + return attemptCh +} + +// isNetErrorRetryable - is network error retryable. +func isNetErrorRetryable(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case net.Error: + switch err.(type) { + case *net.DNSError, *net.OpError, net.UnknownNetworkError: + return true + case *url.Error: + // For a URL error, where it replies back "connection closed" + // retry again. + if strings.Contains(err.Error(), "Connection closed by foreign host") { + return true + } + default: + if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { + // If error is - tlsHandshakeTimeoutError, retry. + return true + } else if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError, retry. + return true + } else if strings.Contains(err.Error(), "connection timed out") { + // If err is a net.Dial timeout, retry. + return true + } + } + } + return false +} + +// List of AWS S3 error codes which are retryable. +var retryableS3Codes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "InternalError": {}, + "ExpiredToken": {}, + "ExpiredTokenException": {}, + // Add more AWS S3 codes here. +} + +// isS3CodeRetryable - is s3 error code retryable. +func isS3CodeRetryable(s3Code string) (ok bool) { + _, ok = retryableS3Codes[s3Code] + return ok +} + +// List of HTTP status codes which are retryable. +var retryableHTTPStatusCodes = map[int]struct{}{ + 429: {}, // http.StatusTooManyRequests is not part of the Go 1.5 library, yet + http.StatusInternalServerError: {}, + http.StatusBadGateway: {}, + http.StatusServiceUnavailable: {}, + // Add more HTTP status codes here. +} + +// isHTTPStatusRetryable - is HTTP error code retryable. +func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { + _, ok = retryableHTTPStatusCodes[httpStatusCode] + return ok +} diff --git a/vendor/github.com/minio/minio-go/s3-endpoints.go b/vendor/github.com/minio/minio-go/s3-endpoints.go index 8c9ff5e88..d7fa5e038 100644 --- a/vendor/github.com/minio/minio-go/s3-endpoints.go +++ b/vendor/github.com/minio/minio-go/s3-endpoints.go @@ -17,16 +17,23 @@ package minio // awsS3EndpointMap Amazon S3 endpoint map. +// "cn-north-1" adds support for AWS China. var awsS3EndpointMap = map[string]string{ "us-east-1": "s3.amazonaws.com", + "us-east-2": "s3-us-east-2.amazonaws.com", "us-west-2": "s3-us-west-2.amazonaws.com", "us-west-1": "s3-us-west-1.amazonaws.com", + "ca-central-1": "s3.ca-central-1.amazonaws.com", "eu-west-1": "s3-eu-west-1.amazonaws.com", + "eu-west-2": "s3-eu-west-2.amazonaws.com", "eu-central-1": "s3-eu-central-1.amazonaws.com", + "ap-south-1": "s3-ap-south-1.amazonaws.com", "ap-southeast-1": "s3-ap-southeast-1.amazonaws.com", + "ap-southeast-2": "s3-ap-southeast-2.amazonaws.com", "ap-northeast-1": "s3-ap-northeast-1.amazonaws.com", "ap-northeast-2": "s3-ap-northeast-2.amazonaws.com", "sa-east-1": "s3-sa-east-1.amazonaws.com", + "cn-north-1": "s3.cn-north-1.amazonaws.com.cn", } // getS3Endpoint get Amazon S3 endpoint based on the bucket location. diff --git a/vendor/github.com/minio/minio-go/s3-error.go b/vendor/github.com/minio/minio-go/s3-error.go new file mode 100644 index 000000000..11b40a0f8 --- /dev/null +++ b/vendor/github.com/minio/minio-go/s3-error.go @@ -0,0 +1,60 @@ +/* + * Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2017 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package minio + +// Non exhaustive list of AWS S3 standard error responses - +// http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html +var s3ErrorResponseMap = map[string]string{ + "AccessDenied": "Access Denied.", + "BadDigest": "The Content-Md5 you specified did not match what we received.", + "EntityTooSmall": "Your proposed upload is smaller than the minimum allowed object size.", + "EntityTooLarge": "Your proposed upload exceeds the maximum allowed object size.", + "IncompleteBody": "You did not provide the number of bytes specified by the Content-Length HTTP header.", + "InternalError": "We encountered an internal error, please try again.", + "InvalidAccessKeyID": "The access key ID you provided does not exist in our records.", + "InvalidBucketName": "The specified bucket is not valid.", + "InvalidDigest": "The Content-Md5 you specified is not valid.", + "InvalidRange": "The requested range is not satisfiable", + "MalformedXML": "The XML you provided was not well-formed or did not validate against our published schema.", + "MissingContentLength": "You must provide the Content-Length HTTP header.", + "MissingContentMD5": "Missing required header for this request: Content-Md5.", + "MissingRequestBodyError": "Request body is empty.", + "NoSuchBucket": "The specified bucket does not exist", + "NoSuchBucketPolicy": "The bucket policy does not exist", + "NoSuchKey": "The specified key does not exist.", + "NoSuchUpload": "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + "NotImplemented": "A header you provided implies functionality that is not implemented", + "PreconditionFailed": "At least one of the pre-conditions you specified did not hold", + "RequestTimeTooSkewed": "The difference between the request time and the server's time is too large.", + "SignatureDoesNotMatch": "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + "MethodNotAllowed": "The specified method is not allowed against this resource.", + "InvalidPart": "One or more of the specified parts could not be found.", + "InvalidPartOrder": "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + "InvalidObjectState": "The operation is not valid for the current state of the object.", + "AuthorizationHeaderMalformed": "The authorization header is malformed; the region is wrong.", + "MalformedPOSTRequest": "The body of your POST request is not well-formed multipart/form-data.", + "BucketNotEmpty": "The bucket you tried to delete is not empty", + "AllAccessDisabled": "All access to this bucket has been disabled.", + "MalformedPolicy": "Policy has invalid resource.", + "MissingFields": "Missing fields in request.", + "AuthorizationQueryParametersError": "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + "MalformedDate": "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + "BucketAlreadyOwnedByYou": "Your previous request to create the named bucket succeeded and you already own it.", + "InvalidDuration": "Duration provided in the request is invalid.", + "XAmzContentSHA256Mismatch": "The provided 'x-amz-content-sha256' header does not match what was computed.", + // Add new API errors here. +} diff --git a/vendor/github.com/minio/minio-go/signature-type.go b/vendor/github.com/minio/minio-go/signature-type.go index cae74cd01..f9a57c3f1 100644 --- a/vendor/github.com/minio/minio-go/signature-type.go +++ b/vendor/github.com/minio/minio-go/signature-type.go @@ -24,8 +24,11 @@ const ( Latest SignatureType = iota SignatureV4 SignatureV2 + SignatureV4Streaming ) +var emptySHA256 = sum256(nil) + // isV2 - is signature SignatureV2? func (s SignatureType) isV2() bool { return s == SignatureV2 @@ -35,3 +38,8 @@ func (s SignatureType) isV2() bool { func (s SignatureType) isV4() bool { return s == SignatureV4 || s == Latest } + +// isStreamingV4 - is signature SignatureV4Streaming? +func (s SignatureType) isStreamingV4() bool { + return s == SignatureV4Streaming +} diff --git a/vendor/github.com/minio/minio-go/utils.go b/vendor/github.com/minio/minio-go/utils.go index 1636e8e04..93cd1712f 100644 --- a/vendor/github.com/minio/minio-go/utils.go +++ b/vendor/github.com/minio/minio-go/utils.go @@ -17,10 +17,8 @@ package minio import ( - "bytes" - "crypto/hmac" + "crypto/md5" "crypto/sha256" - "encoding/hex" "encoding/xml" "io" "io/ioutil" @@ -28,10 +26,11 @@ import ( "net/http" "net/url" "regexp" - "sort" "strings" "time" "unicode/utf8" + + "github.com/minio/minio-go/pkg/s3utils" ) // xmlDecoder provide decoded value in xml. @@ -47,33 +46,33 @@ func sum256(data []byte) []byte { return hash.Sum(nil) } -// sumHMAC calculate hmac between two input byte array. -func sumHMAC(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) +// sumMD5 calculate sumMD5 sum for an input byte array. +func sumMD5(data []byte) []byte { + hash := md5.New() hash.Write(data) return hash.Sum(nil) } // getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { +func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { if strings.Contains(endpoint, ":") { host, _, err := net.SplitHostPort(endpoint) if err != nil { return nil, err } - if !isValidIP(host) && !isValidDomain(host) { + if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." return nil, ErrInvalidArgument(msg) } } else { - if !isValidIP(endpoint) && !isValidDomain(endpoint) { + if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." return nil, ErrInvalidArgument(msg) } } - // if inSecure is true, use 'http' scheme. + // If secure is false, use 'http' scheme. scheme := "https" - if inSecure { + if !secure { scheme = "http" } @@ -85,45 +84,12 @@ func getEndpointURL(endpoint string, inSecure bool) (*url.URL, error) { } // Validate incoming endpoint URL. - if err := isValidEndpointURL(endpointURL); err != nil { + if err := isValidEndpointURL(*endpointURL); err != nil { return nil, err } return endpointURL, nil } -// isValidDomain validates if input string is a valid domain name. -func isValidDomain(host string) bool { - // See RFC 1035, RFC 3696. - host = strings.TrimSpace(host) - if len(host) == 0 || len(host) > 255 { - return false - } - // host cannot start or end with "-" - if host[len(host)-1:] == "-" || host[:1] == "-" { - return false - } - // host cannot start or end with "_" - if host[len(host)-1:] == "_" || host[:1] == "_" { - return false - } - // host cannot start or end with a "." - if host[len(host)-1:] == "." || host[:1] == "." { - return false - } - // All non alphanumeric characters are invalid. - if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:> 0 { - buf.WriteByte('&') - } - buf.WriteString(prefix) - buf.WriteString(urlEncodePath(v)) - } - } - return buf.String() + return h2 } -// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences -// -// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8 -// non english characters cannot be parsed due to the nature in which url.Encode() is written -// -// This function on the other hand is a direct replacement for url.Encode() technique to support -// pretty much every UTF-8 character. -func urlEncodePath(pathName string) string { - // if object matches reserved string, no need to encode them - reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$") - if reservedNames.MatchString(pathName) { - return pathName - } - var encodedPathname string - for _, s := range pathName { - if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - } - switch s { - case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark) - encodedPathname = encodedPathname + string(s) - continue - default: - len := utf8.RuneLen(s) - if len < 0 { - // if utf8 cannot convert return the same string as is - return pathName - } - u := make([]byte, len) - utf8.EncodeRune(u, s) - for _, r := range u { - hex := hex.EncodeToString([]byte{r}) - encodedPathname = encodedPathname + "%" + strings.ToUpper(hex) - } - } +// Filter relevant response headers from +// the HEAD, GET http response. The function takes +// a list of headers which are filtered out and +// returned as a new http header. +func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) { + filteredHeader = cloneHeader(header) + for _, key := range filterKeys { + filteredHeader.Del(key) } - return encodedPathname + return filteredHeader }