Update minio/minio-go to v2.0.4

This commit is contained in:
Alexander Neumann 2017-04-18 21:28:09 +02:00
parent 0befa06cd0
commit 1794bdc663
57 changed files with 2982 additions and 1567 deletions

4
vendor/manifest vendored
View File

@ -28,8 +28,8 @@
{
"importpath": "github.com/minio/minio-go",
"repository": "https://github.com/minio/minio-go",
"revision": "b1674741d196d5d79486d7c1645ed6ded902b712",
"branch": "master"
"revision": "dcaae9ec4d0b0a81d17f22f6d7a186491f6a55ec",
"branch": "HEAD"
},
{
"importpath": "github.com/pkg/errors",

View File

@ -1,5 +1,6 @@
# Minio Golang Library for Amazon S3 Compatible Cloud Storage [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Minio/minio?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compatible object storage server.
# Minio Go Client SDK for Amazon S3 Compatible Cloud Storage [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io)
The Minio Go Client SDK provides simple APIs to access any Amazon S3 compatible object storage.
**Supported cloud storage providers:**
@ -14,22 +15,21 @@ The Minio Golang Client SDK provides simple APIs to access any Amazon S3 compati
- Ceph Object Gateway
- Riak CS
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough of a simple file uploader. For a complete list of APIs and examples, please take a look at the [Golang Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This quickstart guide will show you how to install the Minio client SDK, connect to Minio, and provide a walkthrough for a simple file uploader. For a complete list of APIs and examples, please take a look at the [Go Client API Reference](https://docs.minio.io/docs/golang-client-api-reference).
This document assumes that you have a working [Golang setup](https://docs.minio.io/docs/how-to-install-golang).
This document assumes that you have a working [Go development environment](https://docs.minio.io/docs/how-to-install-golang).
## Download from Github
```sh
$ go get -u github.com/minio/minio-go
go get -u github.com/minio/minio-go
```
## Initialize Minio Client
You need four items to connect to Minio object storage server.
Minio client requires the following four parameters specified to connect to an Amazon S3 compatible object storage.
| Parameter | Description|
@ -68,7 +68,7 @@ func main() {
## Quick Start Example - File Uploader
This example program connects to an object storage server, makes a bucket on the server and then uploads a file to the bucket.
This example program connects to an object storage server, creates a bucket and uploads a file to the bucket.
@ -132,11 +132,11 @@ func main() {
```sh
$ go run file-uploader.go
go run file-uploader.go
2016/08/13 17:03:28 Successfully created mymusic
2016/08/13 17:03:40 Successfully uploaded golden-oldies.zip of size 16253413
$ mc ls play/mymusic/
mc ls play/mymusic/
[2016-05-27 16:02:16 PDT] 17MiB golden-oldies.zip
```
@ -161,6 +161,7 @@ The full API Reference is available here.
* [`SetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#SetBucketPolicy)
* [`GetBucketPolicy`](https://docs.minio.io/docs/golang-client-api-reference#GetBucketPolicy)
* [`ListBucketPolicies`](https://docs.minio.io/docs/golang-client-api-reference#ListBucketPolicies)
### API Reference : Bucket notification Operations
@ -173,14 +174,15 @@ The full API Reference is available here.
* [`FPutObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`FGetObject`](https://docs.minio.io/docs/golang-client-api-reference#FPutObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
### API Reference : Object Operations
* [`GetObject`](https://docs.minio.io/docs/golang-client-api-reference#GetObject)
* [`PutObject`](https://docs.minio.io/docs/golang-client-api-reference#PutObject)
* [`StatObject`](https://docs.minio.io/docs/golang-client-api-reference#StatObject)
* [`CopyObject`](https://docs.minio.io/docs/golang-client-api-reference#CopyObject)
* [`RemoveObject`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObject)
* [`RemoveObjects`](https://docs.minio.io/docs/golang-client-api-reference#RemoveObjects)
* [`RemoveIncompleteUpload`](https://docs.minio.io/docs/golang-client-api-reference#RemoveIncompleteUpload)
### API Reference : Presigned Operations
@ -189,44 +191,52 @@ The full API Reference is available here.
* [`PresignedPutObject`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPutObject)
* [`PresignedPostPolicy`](https://docs.minio.io/docs/golang-client-api-reference#PresignedPostPolicy)
### API Reference : Client custom settings
* [`SetAppInfo`](http://docs.minio.io/docs/golang-client-api-reference#SetAppInfo)
* [`SetCustomTransport`](http://docs.minio.io/docs/golang-client-api-reference#SetCustomTransport)
* [`TraceOn`](http://docs.minio.io/docs/golang-client-api-reference#TraceOn)
* [`TraceOff`](http://docs.minio.io/docs/golang-client-api-reference#TraceOff)
## Full Examples
#### Full Examples : Bucket Operations
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
* [makebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/makebucket.go)
* [listbuckets.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbuckets.go)
* [bucketexists.go](https://github.com/minio/minio-go/blob/master/examples/s3/bucketexists.go)
* [removebucket.go](https://github.com/minio/minio-go/blob/master/examples/s3/removebucket.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [listincompleteuploads.go](https://github.com/minio/minio-go/blob/master/examples/s3/listincompleteuploads.go)
#### Full Examples : Bucket policy Operations
* [setbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketpolicy.go)
* [getbucketpolicy.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketpolicy.go)
* [listbucketpolicies.go](https://github.com/minio/minio-go/blob/master/examples/s3/listbucketpolicies.go)
#### Full Examples : Bucket notification Operations
* [setbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/setbucketnotification.go)
* [getbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/getbucketnotification.go)
* [deletebucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/deletebucketnotification.go)
* [removeallbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeallbucketnotification.go)
* [listenbucketnotification.go](https://github.com/minio/minio-go/blob/master/examples/minio/listenbucketnotification.go) (Minio Extension)
#### Full Examples : File Object Operations
* [fputobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fputobject.go)
* [fgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/fgetobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
#### Full Examples : Object Operations
* [putobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/putobject.go)
* [getobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/getobject.go)
* [listobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjects.go)
* [listobjectsV2.go](https://github.com/minio/minio-go/blob/master/examples/s3/listobjectsV2.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [statobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/statobject.go)
* [copyobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/copyobject.go)
* [removeobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobject.go)
* [removeincompleteupload.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeincompleteupload.go)
* [removeobjects.go](https://github.com/minio/minio-go/blob/master/examples/s3/removeobjects.go)
#### Full Examples : Presigned Operations
* [presignedgetobject.go](https://github.com/minio/minio-go/blob/master/examples/s3/presignedgetobject.go)
@ -235,7 +245,7 @@ The full API Reference is available here.
## Explore Further
* [Complete Documentation](https://docs.minio.io)
* [Minio Golang Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Minio Go Client SDK API Reference](https://docs.minio.io/docs/golang-client-api-reference)
* [Go Music Player App- Full Application Example ](https://docs.minio.io/docs/go-music-player-app)
## Contribute

View File

@ -16,7 +16,10 @@
package minio
import "time"
import (
"net/http"
"time"
)
// BucketInfo container for bucket metadata.
type BucketInfo struct {
@ -38,6 +41,10 @@ type ObjectInfo struct {
Size int64 `json:"size"` // Size in bytes of the object.
ContentType string `json:"contentType"` // A standard MIME type describing the format of the object data.
// Collection of additional metadata on the object.
// eg: x-amz-meta-*, content-encoding etc.
Metadata http.Header `json:"metadata"`
// Owner name.
Owner struct {
DisplayName string `json:"name"`

View File

@ -149,6 +149,16 @@ func httpRespToErrorResponse(resp *http.Response, bucketName, objectName string)
return errResp
}
// ErrTransferAccelerationBucket - bucket name is invalid to be used with transfer acceleration.
func ErrTransferAccelerationBucket(bucketName string) error {
msg := fmt.Sprintf("The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods (\".\").")
return ErrorResponse{
Code: "InvalidArgument",
Message: msg,
BucketName: bucketName,
}
}
// ErrEntityTooLarge - Input size is larger than supported maximum.
func ErrEntityTooLarge(totalSize, maxObjectSize int64, bucketName, objectName string) error {
msg := fmt.Sprintf("Your proposed upload size %d exceeds the maximum allowed object size %d for single PUT operation.", totalSize, maxObjectSize)
@ -201,16 +211,6 @@ func ErrInvalidObjectName(message string) error {
}
}
// ErrInvalidParts - Invalid number of parts.
func ErrInvalidParts(expectedParts, uploadedParts int) error {
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", expectedParts, uploadedParts)
return ErrorResponse{
Code: "InvalidParts",
Message: msg,
RequestID: "minio",
}
}
// ErrInvalidObjectPrefix - Invalid object prefix response is
// similar to object name response.
var ErrInvalidObjectPrefix = ErrInvalidObjectName

View File

@ -249,20 +249,6 @@ func TestErrInvalidObjectName(t *testing.T) {
}
}
// Test validates 'ErrInvalidParts' error response.
func TestErrInvalidParts(t *testing.T) {
msg := fmt.Sprintf("Unexpected number of parts found Want %d, Got %d", 10, 9)
expectedResult := ErrorResponse{
Code: "InvalidParts",
Message: msg,
RequestID: "minio",
}
actualResult := ErrInvalidParts(10, 9)
if !reflect.DeepEqual(expectedResult, actualResult) {
t.Errorf("Expected result to be '%+v', but instead got '%+v'", expectedResult, actualResult)
}
}
// Test validates 'ErrInvalidArgument' response.
func TestErrInvalidArgument(t *testing.T) {
expectedResult := ErrorResponse{

View File

@ -73,7 +73,9 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
if req.isReadAt {
// If this is a ReadAt request only get the specified range.
// Range is set with respect to the offset and length of the buffer requested.
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
// Do not set objectInfo from the first readAt request because it will not get
// the whole object.
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
} else {
// First request is a Read request.
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
@ -115,6 +117,19 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
objectInfo: objectInfo,
}
}
} else if req.settingObjectInfo { // Request is just to get objectInfo.
objectInfo, err := c.StatObject(bucketName, objectName)
if err != nil {
resCh <- getResponse{
Error: err,
}
// Exit the goroutine.
return
}
// Send back the objectInfo.
resCh <- getResponse{
objectInfo: objectInfo,
}
} else {
// Offset changes fetch the new object at an Offset.
// Because the httpReader may not be set by the first
@ -132,7 +147,7 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// Range is set with respect to the offset and length of the buffer requested.
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, int64(len(req.Buffer)))
} else {
httpReader, _, err = c.getObject(bucketName, objectName, req.Offset, 0)
httpReader, objectInfo, err = c.getObject(bucketName, objectName, req.Offset, 0)
}
if err != nil {
resCh <- getResponse{
@ -152,9 +167,10 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
}
// Reply back how much was read.
resCh <- getResponse{
Size: int(size),
Error: err,
didRead: true,
Size: int(size),
Error: err,
didRead: true,
objectInfo: objectInfo,
}
}
}
@ -168,13 +184,14 @@ func (c Client) GetObject(bucketName, objectName string) (*Object, error) {
// get request message container to communicate with internal
// go-routine.
type getRequest struct {
Buffer []byte
Offset int64 // readAt offset.
DidOffsetChange bool // Tracks the offset changes for Seek requests.
beenRead bool // Determines if this is the first time an object is being read.
isReadAt bool // Determines if this request is a request to a specific range
isReadOp bool // Determines if this request is a Read or Read/At request.
isFirstReq bool // Determines if this request is the first time an object is being accessed.
Buffer []byte
Offset int64 // readAt offset.
DidOffsetChange bool // Tracks the offset changes for Seek requests.
beenRead bool // Determines if this is the first time an object is being read.
isReadAt bool // Determines if this request is a request to a specific range
isReadOp bool // Determines if this request is a Read or Read/At request.
isFirstReq bool // Determines if this request is the first time an object is being accessed.
settingObjectInfo bool // Determines if this request is to set the objectInfo of an object.
}
// get response message container to reply back for the request.
@ -195,10 +212,12 @@ type Object struct {
reqCh chan<- getRequest
resCh <-chan getResponse
doneCh chan<- struct{}
prevOffset int64
currOffset int64
objectInfo ObjectInfo
// Ask lower level to initiate data fetching based on currOffset
seekData bool
// Keeps track of closed call.
isClosed bool
@ -210,6 +229,9 @@ type Object struct {
// Keeps track of if this object has been read yet.
beenRead bool
// Keeps track of if objectInfo has been set yet.
objectInfoSet bool
}
// doGetRequest - sends and blocks on the firstReqCh and reqCh of an object.
@ -221,11 +243,15 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
response := <-o.resCh
// This was the first request.
if !o.isStarted {
// Set objectInfo for first time.
o.objectInfo = response.objectInfo
// The object has been operated on.
o.isStarted = true
}
// Set the objectInfo if the request was not readAt
// and it hasn't been set before.
if !o.objectInfoSet && !request.isReadAt {
o.objectInfo = response.objectInfo
o.objectInfoSet = true
}
// Set beenRead only if it has not been set before.
if !o.beenRead {
o.beenRead = response.didRead
@ -235,6 +261,9 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
return response, response.Error
}
// Data are ready on the wire, no need to reinitiate connection in lower level
o.seekData = false
return response, nil
}
@ -243,8 +272,6 @@ func (o *Object) doGetRequest(request getRequest) (getResponse, error) {
func (o *Object) setOffset(bytesRead int64) error {
// Update the currentOffset.
o.currOffset += bytesRead
// Save the current offset as previous offset.
o.prevOffset = o.currOffset
if o.currOffset >= o.objectInfo.Size {
return io.EOF
@ -252,7 +279,7 @@ func (o *Object) setOffset(bytesRead int64) error {
return nil
}
// Read reads up to len(p) bytes into p. It returns the number of
// Read reads up to len(b) bytes into b. It returns the number of
// bytes read (0 <= n <= len(p)) and any error encountered. Returns
// io.EOF upon end of file.
func (o *Object) Read(b []byte) (n int, err error) {
@ -280,27 +307,14 @@ func (o *Object) Read(b []byte) (n int, err error) {
readReq.isFirstReq = true
}
// Verify if offset has changed and currOffset is greater than
// previous offset. Perhaps due to Seek().
offsetChange := o.prevOffset - o.currOffset
if offsetChange < 0 {
offsetChange = -offsetChange
}
if offsetChange > 0 {
// Fetch the new reader at the current offset again.
readReq.Offset = o.currOffset
readReq.DidOffsetChange = true
} else {
// No offset changes no need to fetch new reader, continue
// reading.
readReq.DidOffsetChange = false
readReq.Offset = 0
}
// Ask to establish a new data fetch routine based on seekData flag
readReq.DidOffsetChange = o.seekData
readReq.Offset = o.currOffset
// Send and receive from the first request.
response, err := o.doGetRequest(readReq)
if err != nil {
// Save the error.
if err != nil && err != io.EOF {
// Save the error for future calls.
o.prevErr = err
return response.Size, err
}
@ -309,14 +323,18 @@ func (o *Object) Read(b []byte) (n int, err error) {
bytesRead := int64(response.Size)
// Set the new offset.
err = o.setOffset(bytesRead)
if err != nil {
return response.Size, err
oerr := o.setOffset(bytesRead)
if oerr != nil {
// Save the error for future calls.
o.prevErr = oerr
return response.Size, oerr
}
return response.Size, nil
// Return the response.
return response.Size, err
}
// Stat returns the ObjectInfo structure describing object.
// Stat returns the ObjectInfo structure describing Object.
func (o *Object) Stat() (ObjectInfo, error) {
if o == nil {
return ObjectInfo{}, ErrInvalidArgument("Object is nil")
@ -325,16 +343,15 @@ func (o *Object) Stat() (ObjectInfo, error) {
o.mutex.Lock()
defer o.mutex.Unlock()
if o.prevErr != nil || o.isClosed {
if o.prevErr != nil && o.prevErr != io.EOF || o.isClosed {
return ObjectInfo{}, o.prevErr
}
// This is the first request.
if !o.isStarted {
if !o.isStarted || !o.objectInfoSet {
statReq := getRequest{
isReadOp: false, // This is a Stat not a Read/ReadAt.
Offset: 0,
isFirstReq: true,
isFirstReq: !o.isStarted,
settingObjectInfo: !o.objectInfoSet,
}
// Send the request and get the response.
@ -365,8 +382,9 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
if o.prevErr != nil || o.isClosed {
return 0, o.prevErr
}
// Can only compare offsets to size when size has been set.
if o.isStarted {
if o.objectInfoSet {
// If offset is negative than we return io.EOF.
// If offset is greater than or equal to object size we return io.EOF.
if offset >= o.objectInfo.Size || offset < 0 {
@ -383,6 +401,7 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
Offset: offset, // Set the offset.
Buffer: b,
}
// Alert that this is the first request.
if !o.isStarted {
readAtReq.isFirstReq = true
@ -390,21 +409,29 @@ func (o *Object) ReadAt(b []byte, offset int64) (n int, err error) {
// Send and receive from the first request.
response, err := o.doGetRequest(readAtReq)
if err != nil {
if err != nil && err != io.EOF {
// Save the error.
o.prevErr = err
return 0, err
return response.Size, err
}
// Bytes read.
bytesRead := int64(response.Size)
// Update the offsets.
err = o.setOffset(bytesRead)
if err != nil {
return response.Size, err
// There is no valid objectInfo yet
// to compare against for EOF.
if !o.objectInfoSet {
// Update the currentOffset.
o.currOffset += bytesRead
} else {
// If this was not the first request update
// the offsets and compare against objectInfo
// for EOF.
oerr := o.setOffset(bytesRead)
if oerr != nil {
o.prevErr = oerr
return response.Size, oerr
}
}
return response.Size, nil
return response.Size, err
}
// Seek sets the offset for the next Read or Write to offset,
@ -439,7 +466,7 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
// This is the first request. So before anything else
// get the ObjectInfo.
if !o.isStarted {
if !o.isStarted || !o.objectInfoSet {
// Create the new Seek request.
seekReq := getRequest{
isReadOp: false,
@ -454,8 +481,6 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
return 0, err
}
}
// Save current offset as previous offset.
o.prevOffset = o.currOffset
// Switch through whence.
switch whence {
@ -489,6 +514,10 @@ func (o *Object) Seek(offset int64, whence int) (n int64, err error) {
if o.prevErr == io.EOF {
o.prevErr = nil
}
// Ask lower level to fetch again from source
o.seekData = true
// Return the effective offset.
return o.currOffset, nil
}

View File

@ -41,7 +41,23 @@ func (c Client) GetBucketPolicy(bucketName, objectPrefix string) (bucketPolicy p
return policy.GetPolicy(policyInfo.Statements, bucketName, objectPrefix), nil
}
// Request server for policy.
// ListBucketPolicies - list all policies for a given prefix and all its children.
func (c Client) ListBucketPolicies(bucketName, objectPrefix string) (bucketPolicies map[string]policy.BucketPolicy, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return map[string]policy.BucketPolicy{}, err
}
if err := isValidObjectPrefix(objectPrefix); err != nil {
return map[string]policy.BucketPolicy{}, err
}
policyInfo, err := c.getBucketPolicy(bucketName, objectPrefix)
if err != nil {
return map[string]policy.BucketPolicy{}, err
}
return policy.GetPolicies(policyInfo.Statements, bucketName), nil
}
// Request server for current bucket policy.
func (c Client) getBucketPolicy(bucketName string, objectPrefix string) (policy.BucketAccessPolicy, error) {
// Get resources properly escaped and lined up before
// using them in http request.

View File

@ -84,6 +84,8 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// If recursive we do not delimit.
delimiter = ""
}
// Return object owner information by default
fetchOwner := true
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
defer close(objectStatCh)
@ -108,7 +110,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
var continuationToken string
for {
// Get list of objects a maximum of 1000 per request.
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter, 1000)
result, err := c.listObjectsV2Query(bucketName, objectPrefix, continuationToken, fetchOwner, delimiter, 1000)
if err != nil {
objectStatCh <- ObjectInfo{
Err: err,
@ -166,7 +168,7 @@ func (c Client) ListObjectsV2(bucketName, objectPrefix string, recursive bool, d
// ?delimiter - A delimiter is a character you use to group keys.
// ?prefix - Limits the response to keys that begin with the specified prefix.
// ?max-keys - Sets the maximum number of keys returned in the response body.
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken, delimiter string, maxkeys int) (listBucketV2Result, error) {
func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken string, fetchOwner bool, delimiter string, maxkeys int) (listBucketV2Result, error) {
// Validate bucket name.
if err := isValidBucketName(bucketName); err != nil {
return listBucketV2Result{}, err
@ -195,6 +197,11 @@ func (c Client) listObjectsV2Query(bucketName, objectPrefix, continuationToken,
urlValues.Set("delimiter", delimiter)
}
// Fetch owner when listing
if fetchOwner {
urlValues.Set("fetch-owner", "true")
}
// maxkeys should default to 1000 or less.
if maxkeys == 0 || maxkeys > 1000 {
maxkeys = 1000
@ -475,6 +482,7 @@ func (c Client) listIncompleteUploads(bucketName, objectPrefix string, recursive
objectMultipartStatCh <- ObjectMultipartInfo{
Err: err,
}
continue
}
}
select {

View File

@ -22,6 +22,9 @@ import (
"io"
"net/http"
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// GetBucketNotification - get bucket notification at a given path.
@ -120,7 +123,7 @@ type NotificationInfo struct {
}
// ListenBucketNotification - listen on bucket notifications.
func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, doneCh <-chan struct{}) <-chan NotificationInfo {
func (c Client) ListenBucketNotification(bucketName, prefix, suffix string, events []string, doneCh <-chan struct{}) <-chan NotificationInfo {
notificationInfoCh := make(chan NotificationInfo, 1)
// Only success, start a routine to start reading line by line.
go func(notificationInfoCh chan<- NotificationInfo) {
@ -135,7 +138,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
}
// Check ARN partition to verify if listening bucket is supported
if accountArn.Partition != "minio" {
if s3utils.IsAmazonEndpoint(c.endpointURL) || s3utils.IsGoogleEndpoint(c.endpointURL) {
notificationInfoCh <- NotificationInfo{
Err: ErrAPINotSupported("Listening bucket notification is specific only to `minio` partitions"),
}
@ -143,9 +146,18 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
}
// Continously run and listen on bucket notification.
for {
// Create a done channel to control 'ListObjects' go routine.
retryDoneCh := make(chan struct{}, 1)
// Indicate to our routine to exit cleanly upon return.
defer close(retryDoneCh)
// Wait on the jitter retry loop.
for range c.newRetryTimerContinous(time.Second, time.Second*30, MaxJitter, retryDoneCh) {
urlValues := make(url.Values)
urlValues.Set("notificationARN", accountArn.String())
urlValues.Set("prefix", prefix)
urlValues.Set("suffix", suffix)
urlValues["events"] = events
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("GET", requestMetadata{
@ -153,10 +165,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
queryValues: urlValues,
})
if err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
continue
}
// Validate http response, upon error return quickly.
@ -178,10 +187,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
for bio.Scan() {
var notificationInfo NotificationInfo
if err = json.Unmarshal(bio.Bytes(), &notificationInfo); err != nil {
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
continue
}
// Send notifications on channel only if there are events received.
if len(notificationInfo.Records) > 0 {
@ -198,12 +204,7 @@ func (c Client) ListenBucketNotification(bucketName string, accountArn Arn, done
// and re-connect.
if err == io.ErrUnexpectedEOF {
resp.Body.Close()
continue
}
notificationInfoCh <- NotificationInfo{
Err: err,
}
return
}
}
}(notificationInfoCh)

View File

@ -20,6 +20,9 @@ import (
"errors"
"net/url"
"time"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// supportedGetReqParams - supported request parameters for GET presigned request.
@ -126,14 +129,14 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
policyBase64 := p.base64()
p.formData["policy"] = policyBase64
// For Google endpoint set this value to be 'GoogleAccessId'.
if isGoogleEndpoint(c.endpointURL) {
if s3utils.IsGoogleEndpoint(c.endpointURL) {
p.formData["GoogleAccessId"] = c.accessKeyID
} else {
// For all other endpoints set this value to be 'AWSAccessKeyId'.
p.formData["AWSAccessKeyId"] = c.accessKeyID
}
// Sign the policy.
p.formData["signature"] = postPresignSignatureV2(policyBase64, c.secretAccessKey)
p.formData["signature"] = s3signer.PostPresignSignatureV2(policyBase64, c.secretAccessKey)
return u, p.formData, nil
}
@ -156,7 +159,7 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
}
// Add a credential policy.
credential := getCredential(c.accessKeyID, location, t)
credential := s3signer.GetCredential(c.accessKeyID, location, t)
if err = p.addNewPolicy(policyCondition{
matchType: "eq",
condition: "$x-amz-credential",
@ -172,6 +175,6 @@ func (c Client) PresignedPostPolicy(p *PostPolicy) (u *url.URL, formData map[str
p.formData["x-amz-algorithm"] = signV4Algorithm
p.formData["x-amz-credential"] = credential
p.formData["x-amz-date"] = t.Format(iso8601DateFormat)
p.formData["x-amz-signature"] = postPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
p.formData["x-amz-signature"] = s3signer.PostPresignSignatureV4(policyBase64, t, c.secretAccessKey, location)
return u, p.formData, nil
}

View File

@ -26,8 +26,10 @@ import (
"io/ioutil"
"net/http"
"net/url"
"path"
"github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3signer"
)
/// Bucket operations
@ -89,11 +91,8 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
// is the preferred method here. The final location of the
// 'bucket' is provided through XML LocationConstraint data with
// the request.
targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = "/" + bucketName + "/"
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
req, err := http.NewRequest("PUT", targetURL.String(), nil)
@ -133,9 +132,9 @@ func (c Client) makeBucketRequest(bucketName string, location string) (*http.Req
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.

View File

@ -24,8 +24,10 @@ import (
"io"
"io/ioutil"
"net/http"
"net/url"
"path"
"testing"
"github.com/minio/minio-go/pkg/s3signer"
)
// Tests validate http request formulated for creation of bucket.
@ -33,14 +35,11 @@ func TestMakeBucketRequest(t *testing.T) {
// Generates expected http request for bucket creation.
// Used for asserting with the actual request generated.
createExpectedRequest := func(c *Client, bucketName string, location string, req *http.Request) (*http.Request, error) {
targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL.Path = "/" + bucketName + "/"
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
// get a new HTTP request for the method.
var err error
req, err = http.NewRequest("PUT", targetURL.String(), nil)
if err != nil {
return nil, err
@ -78,9 +77,9 @@ func TestMakeBucketRequest(t *testing.T) {
if c.signature.isV4() {
// Signature calculated for MakeBucket request should be for 'us-east-1',
// regardless of the bucket's location constraint.
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
// Return signed request.

View File

@ -44,18 +44,17 @@ func isReadAt(reader io.Reader) (ok bool) {
}
// shouldUploadPart - verify if part should be uploaded.
func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
func shouldUploadPart(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found should upload the part.
uploadedPart, found := objectParts[objPart.PartNumber]
if !found {
if uploadReq.Part == nil {
return true
}
// if size mismatches should upload the part.
if objPart.Size != uploadedPart.Size {
if objPart.Size != uploadReq.Part.Size {
return true
}
// if md5sum mismatches should upload the part.
if objPart.ETag != uploadedPart.ETag {
if objPart.ETag != uploadReq.Part.ETag {
return true
}
return false
@ -68,7 +67,7 @@ func shouldUploadPart(objPart objectPart, objectParts map[int]objectPart) bool {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
// minPartSize - 5MiB
// minPartSize - 64MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
@ -167,37 +166,64 @@ func hashCopyN(hashAlgorithms map[string]hash.Hash, hashSums map[string][]byte,
// getUploadID - fetch upload id if already present for an object name
// or initiate a new request to fetch a new upload id.
func (c Client) getUploadID(bucketName, objectName, contentType string) (uploadID string, isNew bool, err error) {
func (c Client) newUploadID(bucketName, objectName string, metaData map[string][]string) (uploadID string, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return "", false, err
return "", err
}
if err := isValidObjectName(objectName); err != nil {
return "", false, err
return "", err
}
// Set content Type to default if empty string.
if contentType == "" {
contentType = "application/octet-stream"
}
// Find upload id for previous upload for an object.
uploadID, err = c.findUploadID(bucketName, objectName)
// Initiate multipart upload for an object.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, metaData)
if err != nil {
return "", false, err
return "", err
}
return initMultipartUploadResult.UploadID, nil
}
// getMpartUploadSession returns the upload id and the uploaded parts to continue a previous upload session
// or initiate a new multipart session if no current one found
func (c Client) getMpartUploadSession(bucketName, objectName string, metaData map[string][]string) (string, map[int]objectPart, error) {
// A map of all uploaded parts.
var partsInfo map[int]objectPart
var err error
uploadID, err := c.findUploadID(bucketName, objectName)
if err != nil {
return "", nil, err
}
if uploadID == "" {
// Initiate multipart upload for an object.
initMultipartUploadResult, err := c.initiateMultipartUpload(bucketName, objectName, contentType)
// Initiates a new multipart request
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return "", false, err
return "", nil, err
}
} else {
// Fetch previously upload parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
// When the server returns NoSuchUpload even if its previouls acknowleged the existance of the upload id,
// initiate a new multipart upload
if respErr, ok := err.(ErrorResponse); ok && respErr.Code == "NoSuchUpload" {
uploadID, err = c.newUploadID(bucketName, objectName, metaData)
if err != nil {
return "", nil, err
}
} else {
return "", nil, err
}
}
// Save the new upload id.
uploadID = initMultipartUploadResult.UploadID
// Indicate that this is a new upload id.
isNew = true
}
return uploadID, isNew, nil
// Allocate partsInfo if not done yet
if partsInfo == nil {
partsInfo = make(map[int]objectPart)
}
return uploadID, partsInfo, nil
}
// computeHash - Calculates hashes for an input read Seeker.

View File

@ -16,7 +16,11 @@
package minio
import "net/http"
import (
"net/http"
"github.com/minio/minio-go/pkg/s3utils"
)
// CopyObject - copy a source object into a new object with the provided name in the provided bucket
func (c Client) CopyObject(bucketName string, objectName string, objectSource string, cpCond CopyConditions) error {
@ -38,7 +42,7 @@ func (c Client) CopyObject(bucketName string, objectName string, objectSource st
}
// Set copy source.
customHeaders.Set("x-amz-copy-source", urlEncodePath(objectSource))
customHeaders.Set("x-amz-copy-source", s3utils.EncodePath(objectSource))
// Execute PUT on objectName.
resp, err := c.executeMethod("PUT", requestMetadata{

View File

@ -28,6 +28,8 @@ import (
"os"
"path/filepath"
"sort"
"github.com/minio/minio-go/pkg/s3utils"
)
// FPutObject - Create an object in a bucket, with contents from file at filePath.
@ -62,6 +64,8 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxMultipartPutObjectSize, bucketName, objectName)
}
objMetadata := make(map[string][]string)
// Set contentType based on filepath extension if not given or default
// value of "binary/octet-stream" if the extension has no associated type.
if contentType == "" {
@ -70,9 +74,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
objMetadata["Content-Type"] = []string{contentType}
// NOTE: Google Cloud Storage multipart Put is not compatible with Amazon S3 APIs.
// Current implementation will only upload a maximum of 5GiB to Google Cloud Storage servers.
if isGoogleEndpoint(c.endpointURL) {
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@ -82,11 +88,11 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if fileSize > int64(maxSinglePutObjectSize) {
return 0, ErrorResponse{
Code: "NotImplemented",
@ -97,15 +103,15 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, contentType, nil)
return c.putObjectNoChecksum(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Small object upload is initiated for uploads for input data size smaller than 5MiB.
if fileSize < minPartSize && fileSize >= 0 {
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
// Upload all large objects as multipart.
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, contentType, nil)
n, err = c.putObjectMultipartFromFile(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
@ -116,7 +122,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
return 0, ErrEntityTooLarge(fileSize, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, contentType, nil)
return c.putObjectSingle(bucketName, objectName, fileReader, fileSize, objMetadata, nil)
}
return n, err
}
@ -131,7 +137,7 @@ func (c Client) FPutObject(bucketName, objectName, filePath, contentType string)
// against MD5SUM of each individual parts. This function also
// effectively utilizes file system capabilities of reading from
// specific sections and not having to create temporary files.
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, contentType string, progress io.Reader) (int64, error) {
func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileReader io.ReaderAt, fileSize int64, metaData map[string][]string, progress io.Reader) (int64, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -140,9 +146,8 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@ -151,83 +156,139 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
var totalUploadedSize int64
// Complete multipart upload.
var completeMultipartUpload completeMultipartUpload
// A map of all uploaded parts.
var partsInfo = make(map[int]objectPart)
// If this session is a continuation of a previous session fetch all
// previously uploaded parts info.
if !isNew {
// Fetch previously upload parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
var complMultipartUpload completeMultipartUpload
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(fileSize)
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(fileSize)
if err != nil {
return 0, err
}
// Part number always starts with '1'.
partNumber := 1
// Create a channel to communicate a part was uploaded.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
for partNumber <= totalPartsCount {
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, totalUploadedSize, partSize)
// Create a channel to communicate which part to upload.
// Buffer this to 10000, the maximum number of parts allowed by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// Just for readability.
lastPartNumber := totalPartsCount
var prtSize int64
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
if err != nil {
return 0, err
}
var reader io.Reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
reader = newHook(sectionReader, progress)
// Verify if part should be uploaded.
if shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
}, partsInfo) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber,
hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
return totalUploadedSize, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Send each part through the partUploadCh to be uploaded.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
// Update the progress reader for the skipped part.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, prtSize); err != nil {
return totalUploadedSize, err
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Use three 'workers' to upload parts in parallel.
for w := 1; w <= 3; w++ {
go func() {
// Deal with each part as it comes through the channel.
for uploadReq := range uploadPartsCh {
// Add hash algorithms that need to be calculated by computeHash()
// In case of a non-v4 signature or https connection, sha256 is not needed.
hashAlgos := make(map[string]hash.Hash)
hashSums := make(map[string][]byte)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (fileSize - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(fileReader, readOffset, missingPartSize)
var prtSize int64
var err error
prtSize, err = computeHash(hashAlgos, hashSums, sectionReader)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Create the part to be uploaded.
verifyObjPart := objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: uploadReq.PartNum,
Size: partSize,
}
// If this is the last part do not give it the full part size.
if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Verify if part should be uploaded.
if shouldUploadPart(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, sectionReader, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
}
// Return through the channel the part size.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
}()
}
// Retrieve each uploaded part once it is done.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Save successfully uploaded size.
totalUploadedSize += prtSize
// Increment part number.
partNumber++
// Retrieve each uploaded part and store it to be completed.
part := uploadRes.Part
if part == nil {
return totalUploadedSize, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the total uploaded size.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the part to be completed.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all data.
@ -235,22 +296,9 @@ func (c Client) putObjectMultipartFromFile(bucketName, objectName string, fileRe
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, fileSize, bucketName, objectName)
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
completeMultipartUpload.Parts = append(completeMultipartUpload.Parts, complPart)
}
// Verify if totalPartsCount is not equal to total list of parts.
if totalPartsCount != len(completeMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(completeMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(completeMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, completeMultipartUpload)
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)
if err != nil {
return totalUploadedSize, err
}

View File

@ -22,6 +22,7 @@ import (
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"fmt"
"hash"
"io"
"io/ioutil"
@ -44,11 +45,11 @@ import (
// If we exhaust all the known types, code proceeds to use stream as
// is where each part is re-downloaded, checksummed and verified
// before upload.
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
if size > 0 && size > minPartSize {
// Verify if reader is *os.File, then use file system functionalities.
if isFile(reader) {
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, contentType, progress)
return c.putObjectMultipartFromFile(bucketName, objectName, reader.(*os.File), size, metaData, progress)
}
// Verify if reader is *minio.Object or io.ReaderAt.
// NOTE: Verification of object is kept for a specific purpose
@ -57,17 +58,17 @@ func (c Client) putObjectMultipart(bucketName, objectName string, reader io.Read
// and such a functionality is used in the subsequent code
// path.
if isObject(reader) || isReadAt(reader) {
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, contentType, progress)
return c.putObjectMultipartFromReadAt(bucketName, objectName, reader.(io.ReaderAt), size, metaData, progress)
}
}
// For any other data size and reader type we do generic multipart
// approach by staging data in temporary files and uploading them.
return c.putObjectMultipartStream(bucketName, objectName, reader, size, contentType, progress)
return c.putObjectMultipartStream(bucketName, objectName, reader, size, metaData, progress)
}
// putObjectStream uploads files bigger than 5MiB, and also supports
// putObjectStream uploads files bigger than 64MiB, and also supports
// special case where size is unknown i.e '-1'.
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
func (c Client) putObjectMultipartStream(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -82,26 +83,12 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// A map of all previously uploaded parts.
var partsInfo = make(map[int]objectPart)
// getUploadID for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
// If This session is a continuation of a previous session fetch all
// previously uploaded parts info.
if !isNew {
// Fetch previously uploaded parts and maximum part size.
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, _, err := optimalPartInfo(size)
if err != nil {
@ -115,7 +102,6 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
tmpBuffer := new(bytes.Buffer)
for partNumber <= totalPartsCount {
// Choose hash algorithms to be calculated by hashCopyN, avoid sha256
// with non-v4 signature request or HTTPS connection
hashSums := make(map[string][]byte)
@ -138,12 +124,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// as we read from the source.
reader = newHook(tmpBuffer, progress)
part, ok := partsInfo[partNumber]
// Verify if part should be uploaded.
if shouldUploadPart(objectPart{
if !ok || shouldUploadPart(objectPart{
ETag: hex.EncodeToString(hashSums["md5"]),
PartNumber: partNumber,
Size: prtSize,
}, partsInfo) {
}, uploadPartReq{PartNum: partNumber, Part: &part}) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
@ -169,14 +157,14 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
// Save successfully uploaded size.
totalUploadedSize += prtSize
// Increment part number.
partNumber++
// For unknown size, Read EOF we break away.
// We do not have to upload till totalPartsCount.
if size < 0 && rErr == io.EOF {
break
}
// Increment part number.
partNumber++
}
// Verify if we uploaded all the data.
@ -186,19 +174,17 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
}
if size > 0 {
// Verify if totalPartsCount is not equal to total list of parts.
if totalPartsCount != len(complMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(partNumber, len(complMultipartUpload.Parts))
// Loop over total uploaded parts to save them in
// Parts array before completing the multipart request.
for i := 1; i < partNumber; i++ {
part, ok := partsInfo[i]
if !ok {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", i))
}
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Sort all completed parts.
@ -213,7 +199,7 @@ func (c Client) putObjectMultipartStream(bucketName, objectName string, reader i
}
// initiateMultipartUpload - Initiates a multipart upload and returns an upload ID.
func (c Client) initiateMultipartUpload(bucketName, objectName, contentType string) (initiateMultipartUploadResult, error) {
func (c Client) initiateMultipartUpload(bucketName, objectName string, metaData map[string][]string) (initiateMultipartUploadResult, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return initiateMultipartUploadResult{}, err
@ -226,13 +212,18 @@ func (c Client) initiateMultipartUpload(bucketName, objectName, contentType stri
urlValues := make(url.Values)
urlValues.Set("uploads", "")
if contentType == "" {
contentType = "application/octet-stream"
}
// Set ContentType header.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// Set a default content-type header if the latter is not provided
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
reqMetadata := requestMetadata{
bucketName: bucketName,

View File

@ -16,10 +16,22 @@
package minio
import "io"
import (
"io"
"strings"
// PutObjectWithProgress - With progress.
"github.com/minio/minio-go/pkg/s3utils"
)
// PutObjectWithProgress - with progress.
func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.Reader, contentType string, progress io.Reader) (n int64, err error) {
metaData := make(map[string][]string)
metaData["Content-Type"] = []string{contentType}
return c.PutObjectWithMetadata(bucketName, objectName, reader, metaData, progress)
}
// PutObjectWithMetadata - with metadata.
func (c Client) PutObjectWithMetadata(bucketName, objectName string, reader io.Reader, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -47,7 +59,7 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
// NOTE: Google Cloud Storage does not implement Amazon S3 Compatible multipart PUT.
// So we fall back to single PUT operation with the maximum limit of 5GiB.
if isGoogleEndpoint(c.endpointURL) {
if s3utils.IsGoogleEndpoint(c.endpointURL) {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@ -60,11 +72,11 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Do not compute MD5 for Google Cloud Storage. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// NOTE: S3 doesn't allow anonymous multipart requests.
if isAmazonEndpoint(c.endpointURL) && c.anonymous {
if s3utils.IsAmazonEndpoint(c.endpointURL) && c.anonymous {
if size <= -1 {
return 0, ErrorResponse{
Code: "NotImplemented",
@ -78,26 +90,26 @@ func (c Client) PutObjectWithProgress(bucketName, objectName string, reader io.R
}
// Do not compute MD5 for anonymous requests to Amazon
// S3. Uploads up to 5GiB in size.
return c.putObjectNoChecksum(bucketName, objectName, reader, size, contentType, progress)
return c.putObjectNoChecksum(bucketName, objectName, reader, size, metaData, progress)
}
// putSmall object.
if size < minPartSize && size >= 0 {
return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
// For all sizes greater than 5MiB do multipart.
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, contentType, progress)
n, err = c.putObjectMultipart(bucketName, objectName, reader, size, metaData, progress)
if err != nil {
errResp := ToErrorResponse(err)
// Verify if multipart functionality is not available, if not
// fall back to single PutObject operation.
if errResp.Code == "AccessDenied" && errResp.Message == "Access Denied." {
if errResp.Code == "AccessDenied" && strings.Contains(errResp.Message, "Access Denied") {
// Verify if size of reader is greater than '5GiB'.
if size > maxSinglePutObjectSize {
return 0, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
// Fall back to uploading as single PutObject operation.
return c.putObjectSingle(bucketName, objectName, reader, size, contentType, progress)
return c.putObjectSingle(bucketName, objectName, reader, size, metaData, progress)
}
return n, err
}

View File

@ -20,21 +20,34 @@ import (
"bytes"
"crypto/md5"
"crypto/sha256"
"fmt"
"hash"
"io"
"io/ioutil"
"sort"
)
// uploadedPartRes - the response received from a part upload.
type uploadedPartRes struct {
Error error // Any error encountered while uploading the part.
PartNum int // Number of the part uploaded.
Size int64 // Size of the part uploaded.
Part *objectPart
}
type uploadPartReq struct {
PartNum int // Number of the part uploaded.
Part *objectPart // Size of the part uploaded.
}
// shouldUploadPartReadAt - verify if part should be uploaded.
func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart) bool {
func shouldUploadPartReadAt(objPart objectPart, uploadReq uploadPartReq) bool {
// If part not found part should be uploaded.
uploadedPart, found := objectParts[objPart.PartNumber]
if !found {
if uploadReq.Part == nil {
return true
}
// if size mismatches part should be uploaded.
if uploadedPart.Size != objPart.Size {
if uploadReq.Part.Size != objPart.Size {
return true
}
return false
@ -50,7 +63,7 @@ func shouldUploadPartReadAt(objPart objectPart, objectParts map[int]objectPart)
// temporary files for staging all the data, these temporary files are
// cleaned automatically when the caller i.e http client closes the
// stream after uploading all the contents successfully.
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, contentType string, progress io.Reader) (n int64, err error) {
func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, reader io.ReaderAt, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -59,9 +72,8 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return 0, err
}
// Get upload id for an object, initiates a new multipart request
// if it cannot find any previously partially uploaded object.
uploadID, isNew, err := c.getUploadID(bucketName, objectName, contentType)
// Get the upload id of a previously partially uploaded object or initiate a new multipart upload
uploadID, partsInfo, err := c.getMpartUploadSession(bucketName, objectName, metaData)
if err != nil {
return 0, err
}
@ -72,127 +84,150 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
// Complete multipart upload.
var complMultipartUpload completeMultipartUpload
// A map of all uploaded parts.
var partsInfo = make(map[int]objectPart)
// Fetch all parts info previously uploaded.
if !isNew {
partsInfo, err = c.listObjectParts(bucketName, objectName, uploadID)
if err != nil {
return 0, err
}
}
// Calculate the optimal parts info for a given size.
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(size)
if err != nil {
return 0, err
}
// Used for readability, lastPartNumber is always
// totalPartsCount.
// Used for readability, lastPartNumber is always totalPartsCount.
lastPartNumber := totalPartsCount
// partNumber always starts with '1'.
partNumber := 1
// Declare a channel that sends the next part number to be uploaded.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadPartsCh := make(chan uploadPartReq, 10000)
// Initialize a temporary buffer.
tmpBuffer := new(bytes.Buffer)
// Declare a channel that sends back the response of a part upload.
// Buffered to 10000 because thats the maximum number of parts allowed
// by S3.
uploadedPartsCh := make(chan uploadedPartRes, 10000)
// Read defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadBufferSize)
// Upload all the missing parts.
for partNumber <= lastPartNumber {
// Verify object if its uploaded.
verifyObjPart := objectPart{
PartNumber: partNumber,
Size: partSize,
}
// Special case if we see a last part number, save last part
// size as the proper part size.
if partNumber == lastPartNumber {
verifyObjPart = objectPart{
PartNumber: lastPartNumber,
Size: lastPartSize,
}
// Send each part number to the channel to be processed.
for p := 1; p <= totalPartsCount; p++ {
part, ok := partsInfo[p]
if ok {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: &part}
} else {
uploadPartsCh <- uploadPartReq{PartNum: p, Part: nil}
}
}
close(uploadPartsCh)
// Verify if part should be uploaded.
if !shouldUploadPartReadAt(verifyObjPart, partsInfo) {
// Increment part number when not uploaded.
partNumber++
if progress != nil {
// Update the progress reader for the skipped part.
if _, err = io.CopyN(ioutil.Discard, progress, verifyObjPart.Size); err != nil {
return 0, err
// Receive each part number from the channel allowing three parallel uploads.
for w := 1; w <= 3; w++ {
go func() {
// Read defaults to reading at 5MiB buffer.
readAtBuffer := make([]byte, optimalReadBufferSize)
// Each worker will draw from the part channel and upload in parallel.
for uploadReq := range uploadPartsCh {
// Declare a new tmpBuffer.
tmpBuffer := new(bytes.Buffer)
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(uploadReq.PartNum-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if uploadReq.PartNum == lastPartNumber {
readOffset = (size - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
var prtSize int64
var err error
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
if err != nil {
// Send the error back through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Verify object if its uploaded.
verifyObjPart := objectPart{
PartNumber: uploadReq.PartNum,
Size: partSize,
}
// Special case if we see a last part number, save last part
// size as the proper part size.
if uploadReq.PartNum == lastPartNumber {
verifyObjPart.Size = lastPartSize
}
// Only upload the necessary parts. Otherwise return size through channel
// to update any progress bar.
if shouldUploadPartReadAt(verifyObjPart, uploadReq) {
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, tmpBuffer, uploadReq.PartNum, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
uploadedPartsCh <- uploadedPartRes{
Size: 0,
Error: err,
}
// Exit the goroutine.
return
}
// Save successfully uploaded part metadata.
uploadReq.Part = &objPart
}
// Send successful part info through the channel.
uploadedPartsCh <- uploadedPartRes{
Size: verifyObjPart.Size,
PartNum: uploadReq.PartNum,
Part: uploadReq.Part,
Error: nil,
}
}
continue
}
// If partNumber was not uploaded we calculate the missing
// part offset and size. For all other part numbers we
// calculate offset based on multiples of partSize.
readOffset := int64(partNumber-1) * partSize
missingPartSize := partSize
// As a special case if partNumber is lastPartNumber, we
// calculate the offset based on the last part size.
if partNumber == lastPartNumber {
readOffset = (size - lastPartSize)
missingPartSize = lastPartSize
}
// Get a section reader on a particular offset.
sectionReader := io.NewSectionReader(reader, readOffset, missingPartSize)
// Choose the needed hash algorithms to be calculated by hashCopyBuffer.
// Sha256 is avoided in non-v4 signature requests or HTTPS connections
hashSums := make(map[string][]byte)
hashAlgos := make(map[string]hash.Hash)
hashAlgos["md5"] = md5.New()
if c.signature.isV4() && !c.secure {
hashAlgos["sha256"] = sha256.New()
}
var prtSize int64
prtSize, err = hashCopyBuffer(hashAlgos, hashSums, tmpBuffer, sectionReader, readAtBuffer)
if err != nil {
return 0, err
}
var reader io.Reader
// Update progress reader appropriately to the latest offset
// as we read from the source.
reader = newHook(tmpBuffer, progress)
// Proceed to upload the part.
var objPart objectPart
objPart, err = c.uploadPart(bucketName, objectName, uploadID, reader, partNumber, hashSums["md5"], hashSums["sha256"], prtSize)
if err != nil {
// Reset the buffer upon any error.
tmpBuffer.Reset()
return 0, err
}
// Save successfully uploaded part metadata.
partsInfo[partNumber] = objPart
// Increment part number here after successful part upload.
partNumber++
// Reset the buffer.
tmpBuffer.Reset()
}()
}
// Loop over uploaded parts to save them in a Parts array before completing the multipart request.
for _, part := range partsInfo {
var complPart completePart
complPart.ETag = part.ETag
complPart.PartNumber = part.PartNumber
totalUploadedSize += part.Size
complMultipartUpload.Parts = append(complMultipartUpload.Parts, complPart)
// Gather the responses as they occur and update any
// progress bar.
for u := 1; u <= totalPartsCount; u++ {
uploadRes := <-uploadedPartsCh
if uploadRes.Error != nil {
return totalUploadedSize, uploadRes.Error
}
// Retrieve each uploaded part and store it to be completed.
// part, ok := partsInfo[uploadRes.PartNum]
part := uploadRes.Part
if part == nil {
return 0, ErrInvalidArgument(fmt.Sprintf("Missing part number %d", uploadRes.PartNum))
}
// Update the totalUploadedSize.
totalUploadedSize += uploadRes.Size
// Update the progress bar if there is one.
if progress != nil {
if _, err = io.CopyN(ioutil.Discard, progress, uploadRes.Size); err != nil {
return totalUploadedSize, err
}
}
// Store the parts to be completed in order.
complMultipartUpload.Parts = append(complMultipartUpload.Parts, completePart{
ETag: part.ETag,
PartNumber: part.PartNumber,
})
}
// Verify if we uploaded all the data.
@ -200,11 +235,6 @@ func (c Client) putObjectMultipartFromReadAt(bucketName, objectName string, read
return totalUploadedSize, ErrUnexpectedEOF(totalUploadedSize, size, bucketName, objectName)
}
// Verify if totalPartsCount is not equal to total list of parts.
if totalPartsCount != len(complMultipartUpload.Parts) {
return totalUploadedSize, ErrInvalidParts(totalPartsCount, len(complMultipartUpload.Parts))
}
// Sort all completed parts.
sort.Sort(completedParts(complMultipartUpload.Parts))
_, err = c.completeMultipartUpload(bucketName, objectName, uploadID, complMultipartUpload)

View File

@ -103,11 +103,10 @@ func getReaderSize(reader io.Reader) (size int64, err error) {
// implement Seekable calls. Ignore them and treat
// them like a stream with unknown length.
switch st.Name() {
case "stdin":
fallthrough
case "stdout":
fallthrough
case "stderr":
case "stdin", "stdout", "stderr":
return
// Ignore read/write stream of os.Pipe() which have unknown length too.
case "|0", "|1":
return
}
size = st.Size()
@ -151,7 +150,7 @@ func (c Client) PutObject(bucketName, objectName string, reader io.Reader, conte
// putObjectNoChecksum special function used Google Cloud Storage. This special function
// is used for Google Cloud Storage since Google's multipart API is not S3 compatible.
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -169,7 +168,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// This function does not calculate sha256 and md5sum for payload.
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, contentType)
st, err := c.putObjectDo(bucketName, objectName, readSeeker, nil, nil, size, metaData)
if err != nil {
return 0, err
}
@ -181,7 +180,7 @@ func (c Client) putObjectNoChecksum(bucketName, objectName string, reader io.Rea
// putObjectSingle is a special function for uploading single put object request.
// This special function is used as a fallback when multipart upload fails.
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, contentType string, progress io.Reader) (n int64, err error) {
func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader, size int64, metaData map[string][]string, progress io.Reader) (n int64, err error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return 0, err
@ -221,6 +220,9 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
}
defer tmpFile.Close()
size, err = hashCopyN(hashAlgos, hashSums, tmpFile, reader, size)
if err != nil {
return 0, err
}
// Seek back to beginning of the temporary file.
if _, err = tmpFile.Seek(0, 0); err != nil {
return 0, err
@ -234,7 +236,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
}
}
// Execute put object.
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, contentType)
st, err := c.putObjectDo(bucketName, objectName, reader, hashSums["md5"], hashSums["sha256"], size, metaData)
if err != nil {
return 0, err
}
@ -252,7 +254,7 @@ func (c Client) putObjectSingle(bucketName, objectName string, reader io.Reader,
// putObjectDo - executes the put object http operation.
// NOTE: You must have WRITE permissions on a bucket to add an object to it.
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, contentType string) (ObjectInfo, error) {
func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5Sum []byte, sha256Sum []byte, size int64, metaData map[string][]string) (ObjectInfo, error) {
// Input validation.
if err := isValidBucketName(bucketName); err != nil {
return ObjectInfo{}, err
@ -269,13 +271,20 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
return ObjectInfo{}, ErrEntityTooLarge(size, maxSinglePutObjectSize, bucketName, objectName)
}
if strings.TrimSpace(contentType) == "" {
contentType = "application/octet-stream"
}
// Set headers.
customHeader := make(http.Header)
customHeader.Set("Content-Type", contentType)
// Set metadata to headers
for k, v := range metaData {
if len(v) > 0 {
customHeader.Set(k, v[0])
}
}
// If Content-Type is not provided, set the default application/octet-stream one
if v, ok := metaData["Content-Type"]; !ok || len(v) == 0 {
customHeader.Set("Content-Type", "application/octet-stream")
}
// Populate request metadata.
reqMetadata := requestMetadata{
@ -300,13 +309,13 @@ func (c Client) putObjectDo(bucketName, objectName string, reader io.Reader, md5
}
}
var metadata ObjectInfo
var objInfo ObjectInfo
// Trim off the odd double quotes from ETag in the beginning and end.
metadata.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
metadata.ETag = strings.TrimSuffix(metadata.ETag, "\"")
objInfo.ETag = strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
objInfo.ETag = strings.TrimSuffix(objInfo.ETag, "\"")
// A success here means data was written to server successfully.
metadata.Size = size
objInfo.Size = size
// Return here.
return metadata, nil
return objInfo, nil
}

View File

@ -17,6 +17,9 @@
package minio
import (
"bytes"
"encoding/xml"
"io"
"net/http"
"net/url"
)
@ -68,12 +71,142 @@ func (c Client) RemoveObject(bucketName, objectName string) error {
if err != nil {
return err
}
if resp != nil {
// if some unexpected error happened and max retry is reached, we want to let client know
if resp.StatusCode != http.StatusNoContent {
return httpRespToErrorResponse(resp, bucketName, objectName)
}
}
// DeleteObject always responds with http '204' even for
// objects which do not exist. So no need to handle them
// specifically.
return nil
}
// RemoveObjectError - container of Multi Delete S3 API error
type RemoveObjectError struct {
ObjectName string
Err error
}
// generateRemoveMultiObjects - generate the XML request for remove multi objects request
func generateRemoveMultiObjectsRequest(objects []string) []byte {
rmObjects := []deleteObject{}
for _, obj := range objects {
rmObjects = append(rmObjects, deleteObject{Key: obj})
}
xmlBytes, _ := xml.Marshal(deleteMultiObjects{Objects: rmObjects, Quiet: true})
return xmlBytes
}
// processRemoveMultiObjectsResponse - parse the remove multi objects web service
// and return the success/failure result status for each object
func processRemoveMultiObjectsResponse(body io.Reader, objects []string, errorCh chan<- RemoveObjectError) {
// Parse multi delete XML response
rmResult := &deleteMultiObjectsResult{}
err := xmlDecoder(body, rmResult)
if err != nil {
errorCh <- RemoveObjectError{ObjectName: "", Err: err}
return
}
// Fill deletion that returned an error.
for _, obj := range rmResult.UnDeletedObjects {
errorCh <- RemoveObjectError{
ObjectName: obj.Key,
Err: ErrorResponse{
Code: obj.Code,
Message: obj.Message,
},
}
}
}
// RemoveObjects remove multiples objects from a bucket.
// The list of objects to remove are received from objectsCh.
// Remove failures are sent back via error channel.
func (c Client) RemoveObjects(bucketName string, objectsCh <-chan string) <-chan RemoveObjectError {
errorCh := make(chan RemoveObjectError, 1)
// Validate if bucket name is valid.
if err := isValidBucketName(bucketName); err != nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: err,
}
return errorCh
}
// Validate objects channel to be properly allocated.
if objectsCh == nil {
defer close(errorCh)
errorCh <- RemoveObjectError{
Err: ErrInvalidArgument("Objects channel cannot be nil"),
}
return errorCh
}
// Generate and call MultiDelete S3 requests based on entries received from objectsCh
go func(errorCh chan<- RemoveObjectError) {
maxEntries := 1000
finish := false
urlValues := make(url.Values)
urlValues.Set("delete", "")
// Close error channel when Multi delete finishes.
defer close(errorCh)
// Loop over entries by 1000 and call MultiDelete requests
for {
if finish {
break
}
count := 0
var batch []string
// Try to gather 1000 entries
for object := range objectsCh {
batch = append(batch, object)
if count++; count >= maxEntries {
break
}
}
if count == 0 {
// Multi Objects Delete API doesn't accept empty object list, quit immediatly
break
}
if count < maxEntries {
// We didn't have 1000 entries, so this is the last batch
finish = true
}
// Generate remove multi objects XML request
removeBytes := generateRemoveMultiObjectsRequest(batch)
// Execute GET on bucket to list objects.
resp, err := c.executeMethod("POST", requestMetadata{
bucketName: bucketName,
queryValues: urlValues,
contentBody: bytes.NewReader(removeBytes),
contentLength: int64(len(removeBytes)),
contentMD5Bytes: sumMD5(removeBytes),
contentSHA256Bytes: sum256(removeBytes),
})
if err != nil {
for _, b := range batch {
errorCh <- RemoveObjectError{ObjectName: b, Err: err}
}
continue
}
// Process multiobjects remove xml response
processRemoveMultiObjectsResponse(resp.Body, batch, errorCh)
closeResponse(resp)
}
}(errorCh)
return errorCh
}
// RemoveIncompleteUpload aborts an partially uploaded object.
// Requires explicit authentication, no anonymous requests are allowed for multipart API.
func (c Client) RemoveIncompleteUpload(bucketName, objectName string) error {

View File

@ -206,3 +206,39 @@ type createBucketConfiguration struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
// deleteObject container for Delete element in MultiObjects Delete XML request
type deleteObject struct {
Key string
VersionID string `xml:"VersionId,omitempty"`
}
// deletedObject container for Deleted element in MultiObjects Delete XML response
type deletedObject struct {
Key string
VersionID string `xml:"VersionId,omitempty"`
// These fields are ignored.
DeleteMarker bool
DeleteMarkerVersionID string
}
// nonDeletedObject container for Error element (failed deletion) in MultiObjects Delete XML response
type nonDeletedObject struct {
Key string
Code string
Message string
}
// deletedMultiObjects container for MultiObjects Delete XML request
type deleteMultiObjects struct {
XMLName xml.Name `xml:"Delete"`
Quiet bool
Objects []deleteObject `xml:"Object"`
}
// deletedMultiObjectsResult container for MultiObjects Delete XML response
type deleteMultiObjectsResult struct {
XMLName xml.Name `xml:"DeleteResult"`
DeletedObjects []deletedObject `xml:"Deleted"`
UnDeletedObjects []nonDeletedObject `xml:"Error"`
}

View File

@ -21,6 +21,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// BucketExists verify if bucket exists and you have permission to access it.
@ -49,6 +51,31 @@ func (c Client) BucketExists(bucketName string) (bool, error) {
return true, nil
}
// List of header keys to be filtered, usually
// from all S3 API http responses.
var defaultFilterKeys = []string{
"Transfer-Encoding",
"Accept-Ranges",
"Date",
"Server",
"Vary",
"x-amz-request-id",
"x-amz-id-2",
// Add new headers to be ignored.
}
// Extract only necessary metadata header key/values by
// filtering them out with a list of custom header keys.
func extractObjMetadata(header http.Header) http.Header {
filterKeys := append([]string{
"ETag",
"Content-Length",
"Last-Modified",
"Content-Type",
}, defaultFilterKeys...)
return filterHeader(header, filterKeys)
}
// StatObject verifies if object exists and you have permission to access.
func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
// Input validation.
@ -78,17 +105,21 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
md5sum := strings.TrimPrefix(resp.Header.Get("ETag"), "\"")
md5sum = strings.TrimSuffix(md5sum, "\"")
// Parse content length.
size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
// Content-Length is not valid for Google Cloud Storage, do not verify.
var size int64 = -1
if !s3utils.IsGoogleEndpoint(c.endpointURL) {
// Parse content length.
size, err = strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
if err != nil {
return ObjectInfo{}, ErrorResponse{
Code: "InternalError",
Message: "Content-Length is invalid. " + reportIssue,
BucketName: bucketName,
Key: objectName,
RequestID: resp.Header.Get("x-amz-request-id"),
HostID: resp.Header.Get("x-amz-id-2"),
Region: resp.Header.Get("x-amz-bucket-region"),
}
}
}
// Parse Last-Modified has http time format.
@ -109,12 +140,19 @@ func (c Client) StatObject(bucketName, objectName string) (ObjectInfo, error) {
if contentType == "" {
contentType = "application/octet-stream"
}
// Extract only the relevant header keys describing the object.
// following function filters out a list of standard set of keys
// which are not part of object metadata.
metadata := extractObjMetadata(resp.Header)
// Save object metadata info.
var objectStat ObjectInfo
objectStat.ETag = md5sum
objectStat.Key = objectName
objectStat.Size = size
objectStat.LastModified = date
objectStat.ContentType = contentType
return objectStat, nil
return ObjectInfo{
ETag: md5sum,
Key: objectName,
Size: size,
LastModified: date,
ContentType: contentType,
Metadata: metadata,
}, nil
}

View File

@ -33,12 +33,18 @@ import (
"strings"
"sync"
"time"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// Client implements Amazon S3 compatible methods.
type Client struct {
/// Standard options.
// Parsed endpoint url provided by the user.
endpointURL url.URL
// AccessKeyID required for authorized requests.
accessKeyID string
// SecretAccessKey required for authorized requests.
@ -53,7 +59,6 @@ type Client struct {
appName string
appVersion string
}
endpointURL string
// Indicate whether we are using https or not
secure bool
@ -66,6 +71,9 @@ type Client struct {
isTraceEnabled bool
traceOutput io.Writer
// S3 specific accelerated endpoint.
s3AccelerateEndpoint string
// Random seed.
random *rand.Rand
}
@ -73,7 +81,7 @@ type Client struct {
// Global constants.
const (
libraryName = "minio-go"
libraryVersion = "2.0.1"
libraryVersion = "2.0.4"
)
// User Agent should always following the below style.
@ -116,13 +124,12 @@ func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*Cl
if err != nil {
return nil, err
}
// Google cloud storage should be set to signature V2, force it if
// not.
if isGoogleEndpoint(clnt.endpointURL) {
// Google cloud storage should be set to signature V2, force it if not.
if s3utils.IsGoogleEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV2
}
// If Amazon S3 set to signature v2.n
if isAmazonEndpoint(clnt.endpointURL) {
if s3utils.IsAmazonEndpoint(clnt.endpointURL) {
clnt.signature = SignatureV4
}
return clnt, nil
@ -151,6 +158,18 @@ func (r *lockedRandSource) Seed(seed int64) {
r.lk.Unlock()
}
// redirectHeaders copies all headers when following a redirect URL.
// This won't be needed anymore from go 1.8 (https://github.com/golang/go/issues/4800)
func redirectHeaders(req *http.Request, via []*http.Request) error {
if len(via) == 0 {
return nil
}
for key, val := range via[0].Header {
req.Header[key] = val
}
return nil
}
func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Client, error) {
// construct endpoint.
endpointURL, err := getEndpointURL(endpoint, secure)
@ -170,11 +189,12 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
clnt.secure = secure
// Save endpoint URL, user agent for future uses.
clnt.endpointURL = endpointURL.String()
clnt.endpointURL = *endpointURL
// Instantiate http client and bucket location cache.
clnt.httpClient = &http.Client{
Transport: http.DefaultTransport,
Transport: http.DefaultTransport,
CheckRedirect: redirectHeaders,
}
// Instantiae bucket location cache.
@ -189,8 +209,7 @@ func privateNew(endpoint, accessKeyID, secretAccessKey string, secure bool) (*Cl
// SetAppInfo - add application details to user agent.
func (c *Client) SetAppInfo(appName string, appVersion string) {
// if app name and version is not set, we do not a new user
// agent.
// if app name and version not set, we do not set a new user agent.
if appName != "" && appVersion != "" {
c.appInfo = struct {
appName string
@ -241,8 +260,18 @@ func (c *Client) TraceOff() {
c.isTraceEnabled = false
}
// requestMetadata - is container for all the values to make a
// request.
// SetS3TransferAccelerate - turns s3 accelerated endpoint on or off for all your
// requests. This feature is only specific to S3 for all other endpoints this
// function does nothing. To read further details on s3 transfer acceleration
// please vist -
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
func (c *Client) SetS3TransferAccelerate(accelerateEndpoint string) {
if s3utils.IsAmazonEndpoint(c.endpointURL) {
c.s3AccelerateEndpoint = accelerateEndpoint
}
}
// requestMetadata - is container for all the values to make a request.
type requestMetadata struct {
// If set newRequest presigns the URL.
presignURL bool
@ -262,6 +291,12 @@ type requestMetadata struct {
contentMD5Bytes []byte
}
// regCred matches credential string in HTTP header
var regCred = regexp.MustCompile("Credential=([A-Z0-9]+)/")
// regCred matches signature string in HTTP header
var regSign = regexp.MustCompile("Signature=([[0-9a-f]+)")
// Filter out signature value from Authorization header.
func (c Client) filterSignature(req *http.Request) {
// For anonymous requests, no need to filter.
@ -281,11 +316,9 @@ func (c Client) filterSignature(req *http.Request) {
origAuth := req.Header.Get("Authorization")
// Strip out accessKeyID from:
// Credential=<access-key-id>/<date>/<aws-region>/<aws-service>/aws4_request
regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/")
newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/")
// Strip out 256-bit signature from: Signature=<256-bit signature>
regSign := regexp.MustCompile("Signature=([[0-9a-f]+)")
newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**")
// Set a temporary redacted auth
@ -364,20 +397,35 @@ func (c Client) dumpHTTP(req *http.Request, resp *http.Response) error {
// do - execute http request.
func (c Client) do(req *http.Request) (*http.Response, error) {
// do the request.
resp, err := c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang
// versions fix this issue properly.
urlErr, ok := err.(*url.Error)
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
var resp *http.Response
var err error
// Do the request in a loop in case of 307 http is met since golang still doesn't
// handle properly this situation (https://github.com/golang/go/issues/7912)
for {
resp, err = c.httpClient.Do(req)
if err != nil {
// Handle this specifically for now until future Golang
// versions fix this issue properly.
urlErr, ok := err.(*url.Error)
if ok && strings.Contains(urlErr.Err.Error(), "EOF") {
return nil, &url.Error{
Op: urlErr.Op,
URL: urlErr.URL,
Err: fmt.Errorf("Connection closed by foreign host %s. Retry again.", urlErr.URL),
}
}
return nil, err
}
// Redo the request with the new redirect url if http 307 is returned, quit the loop otherwise
if resp != nil && resp.StatusCode == http.StatusTemporaryRedirect {
newURL, err := url.Parse(resp.Header.Get("Location"))
if err != nil {
break
}
req.URL = newURL
} else {
break
}
return nil, err
}
// Response cannot be non-nil, report if its the case.
@ -467,6 +515,8 @@ func (c Client) executeMethod(method string, metadata requestMetadata) (res *htt
// Read the body to be saved later.
errBodyBytes, err := ioutil.ReadAll(res.Body)
// res.Body should be closed
closeResponse(res)
if err != nil {
return nil, err
}
@ -512,7 +562,7 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
// Default all requests to "us-east-1" or "cn-north-1" (china region)
location := "us-east-1"
if isAmazonChinaEndpoint(c.endpointURL) {
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@ -550,10 +600,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
}
if c.signature.isV2() {
// Presign URL with signature v2.
req = preSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
req = s3signer.PreSignV2(*req, c.accessKeyID, c.secretAccessKey, metadata.expires)
} else {
// Presign URL with signature v4.
req = preSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
req = s3signer.PreSignV4(*req, c.accessKeyID, c.secretAccessKey, location, metadata.expires)
}
return req, nil
}
@ -563,10 +613,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
req.Body = ioutil.NopCloser(metadata.contentBody)
}
// FIXEM: Enable this when Google Cloud Storage properly supports 100-continue.
// FIXME: Enable this when Google Cloud Storage properly supports 100-continue.
// Skip setting 'expect' header for Google Cloud Storage, there
// are some known issues - https://github.com/restic/restic/issues/520
if !isGoogleEndpoint(c.endpointURL) {
if !s3utils.IsGoogleEndpoint(c.endpointURL) && c.s3AccelerateEndpoint == "" {
// Set 'Expect' header for the request.
req.Header.Set("Expect", "100-continue")
}
@ -610,10 +660,10 @@ func (c Client) newRequest(method string, metadata requestMetadata) (req *http.R
if !c.anonymous {
if c.signature.isV2() {
// Add signature version '2' authorization header.
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
} else if c.signature.isV4() {
// Add signature version '4' authorization header.
req = signV4(*req, c.accessKeyID, c.secretAccessKey, location)
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, location)
}
}
@ -631,26 +681,34 @@ func (c Client) setUserAgent(req *http.Request) {
// makeTargetURL make a new target url.
func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, queryValues url.Values) (*url.URL, error) {
// Save host.
url, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
host := url.Host
host := c.endpointURL.Host
// For Amazon S3 endpoint, try to fetch location based endpoint.
if isAmazonEndpoint(c.endpointURL) {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
if s3utils.IsAmazonEndpoint(c.endpointURL) {
if c.s3AccelerateEndpoint != "" && bucketName != "" {
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// Disable transfer acceleration for non-compliant bucket names.
if strings.Contains(bucketName, ".") {
return nil, ErrTransferAccelerationBucket(bucketName)
}
// If transfer acceleration is requested set new host.
// For more details about enabling transfer acceleration read here.
// http://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
host = c.s3AccelerateEndpoint
} else {
// Fetch new host based on the bucket location.
host = getS3Endpoint(bucketLocation)
}
}
// Save scheme.
scheme := url.Scheme
scheme := c.endpointURL.Scheme
urlStr := scheme + "://" + host + "/"
// Make URL only if bucketName is available, otherwise use the
// endpoint URL.
if bucketName != "" {
// Save if target url will have buckets which suppport virtual host.
isVirtualHostStyle := isVirtualHostSupported(c.endpointURL, bucketName)
isVirtualHostStyle := s3utils.IsVirtualHostSupported(c.endpointURL, bucketName)
// If endpoint supports virtual host style use that always.
// Currently only S3 and Google Cloud Storage would support
@ -658,19 +716,19 @@ func (c Client) makeTargetURL(bucketName, objectName, bucketLocation string, que
if isVirtualHostStyle {
urlStr = scheme + "://" + bucketName + "." + host + "/"
if objectName != "" {
urlStr = urlStr + urlEncodePath(objectName)
urlStr = urlStr + s3utils.EncodePath(objectName)
}
} else {
// If not fall back to using path style.
urlStr = urlStr + bucketName + "/"
if objectName != "" {
urlStr = urlStr + urlEncodePath(objectName)
urlStr = urlStr + s3utils.EncodePath(objectName)
}
}
}
// If there are any query values, add them to the end.
if len(queryValues) > 0 {
urlStr = urlStr + "?" + queryEncode(queryValues)
urlStr = urlStr + "?" + s3utils.QueryEncode(queryValues)
}
u, err := url.Parse(urlStr)
if err != nil {

View File

@ -18,7 +18,6 @@ package minio
import (
"bytes"
crand "crypto/rand"
"errors"
"io"
"io/ioutil"
@ -43,10 +42,10 @@ func TestMakeBucketErrorV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -89,10 +88,10 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -113,13 +112,8 @@ func TestGetObjectClosedTwiceV2(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Generate data more than 32K.
buf := bytes.Repeat([]byte("h"), rand.Intn(1<<20)+32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@ -174,10 +168,10 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -198,15 +192,18 @@ func TestRemovePartiallyUploadedV2(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
r := bytes.NewReader(bytes.Repeat([]byte("a"), 128*1024))
reader, writer := io.Pipe()
go func() {
i := 0
for i < 25 {
_, err = io.CopyN(writer, crand.Reader, 128*1024)
_, err = io.CopyN(writer, r, 128*1024)
if err != nil {
t.Fatal("Error:", err, bucketName)
}
i++
r.Seek(0, 0)
}
writer.CloseWithError(errors.New("Proactively closed to be verified later."))
}()
@ -241,10 +238,10 @@ func TestResumablePutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -271,8 +268,9 @@ func TestResumablePutObjectV2(t *testing.T) {
t.Fatal("Error:", err)
}
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
// Copy 11MiB worth of random data.
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
n, err := io.CopyN(file, r, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
@ -352,10 +350,10 @@ func TestFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -382,7 +380,8 @@ func TestFPutObjectV2(t *testing.T) {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
n, err := io.CopyN(file, r, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
@ -500,10 +499,10 @@ func TestResumableFPutObjectV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -529,7 +528,8 @@ func TestResumableFPutObjectV2(t *testing.T) {
t.Fatal("Error:", err)
}
n, err := io.CopyN(file, crand.Reader, 11*1024*1024)
r := bytes.NewReader(bytes.Repeat([]byte("b"), 11*1024*1024))
n, err := io.CopyN(file, r, 11*1024*1024)
if err != nil {
t.Fatal("Error:", err)
}
@ -577,10 +577,10 @@ func TestMakeBucketRegionsV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -628,10 +628,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -652,15 +652,10 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
t.Fatal("Error:", err, bucketName)
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
// Generate data more than 32K.
buf := bytes.Repeat([]byte("2"), rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
// Save the data
// Save the data.
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "binary/octet-stream")
if err != nil {
@ -716,7 +711,7 @@ func TestGetObjectReadSeekFunctionalV2(t *testing.T) {
}
var buffer1 bytes.Buffer
if n, err = io.CopyN(&buffer1, r, st.Size); err != nil {
if _, err = io.CopyN(&buffer1, r, st.Size); err != nil {
if err != io.EOF {
t.Fatal("Error:", err)
}
@ -766,10 +761,10 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
// Instantiate new minio client object.
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -791,12 +786,7 @@ func TestGetObjectReadAtFunctionalV2(t *testing.T) {
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
buf := bytes.Repeat([]byte("8"), rand.Intn(1<<20)+32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@ -907,10 +897,10 @@ func TestCopyObjectV2(t *testing.T) {
// Instantiate new minio client object
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -938,12 +928,7 @@ func TestCopyObjectV2(t *testing.T) {
}
// Generate data more than 32K
buf := make([]byte, rand.Intn(1<<20)+32*1024)
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error:", err)
}
buf := bytes.Repeat([]byte("9"), rand.Intn(1<<20)+32*1024)
// Save the data
objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
@ -958,7 +943,7 @@ func TestCopyObjectV2(t *testing.T) {
}
// Set copy conditions.
copyConds := NewCopyConditions()
copyConds := CopyConditions{}
err = copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))
if err != nil {
t.Fatal("Error:", err)
@ -1029,10 +1014,10 @@ func TestFunctionalV2(t *testing.T) {
rand.Seed(time.Now().Unix())
c, err := NewV2(
"s3.amazonaws.com",
os.Getenv("S3_ADDRESS"),
os.Getenv("ACCESS_KEY"),
os.Getenv("SECRET_KEY"),
true,
mustParseBool(os.Getenv("S3_SECURE")),
)
if err != nil {
t.Fatal("Error:", err)
@ -1109,11 +1094,7 @@ func TestFunctionalV2(t *testing.T) {
objectName := bucketName + "unique"
// Generate data
buf := make([]byte, rand.Intn(1<<19))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
buf := bytes.Repeat([]byte("n"), rand.Intn(1<<19))
n, err := c.PutObject(bucketName, objectName, bytes.NewReader(buf), "")
if err != nil {
@ -1243,11 +1224,9 @@ func TestFunctionalV2(t *testing.T) {
if err != nil {
t.Fatal("Error: ", err)
}
buf = make([]byte, rand.Intn(1<<20))
_, err = io.ReadFull(crand.Reader, buf)
if err != nil {
t.Fatal("Error: ", err)
}
// Generate data more than 32K
buf = bytes.Repeat([]byte("1"), rand.Intn(1<<20)+32*1024)
req, err := http.NewRequest("PUT", presignedPutURL.String(), bytes.NewReader(buf))
if err != nil {
t.Fatal("Error: ", err)

File diff suppressed because it is too large Load Diff

View File

@ -18,11 +18,9 @@ package minio
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"strings"
"testing"
@ -202,49 +200,6 @@ func TestTempFile(t *testing.T) {
}
}
// Tests url encoding.
func TestEncodeURL2Path(t *testing.T) {
type urlStrings struct {
objName string
encodedObjName string
}
bucketName := "bucketName"
want := []urlStrings{
{
objName: "本語",
encodedObjName: "%E6%9C%AC%E8%AA%9E",
},
{
objName: "本語.1",
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
},
{
objName: ">123>3123123",
encodedObjName: "%3E123%3E3123123",
},
{
objName: "test 1 2.txt",
encodedObjName: "test%201%202.txt",
},
{
objName: "test++ 1.txt",
encodedObjName: "test%2B%2B%201.txt",
},
}
for _, o := range want {
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
if err != nil {
t.Fatal("Error:", err)
}
urlPath := "/" + bucketName + "/" + o.encodedObjName
if urlPath != encodeURL2Path(u) {
t.Fatal("Error")
}
}
}
// Tests error response structure.
func TestErrorResponse(t *testing.T) {
var err error
@ -270,53 +225,6 @@ func TestErrorResponse(t *testing.T) {
}
}
// Tests signature calculation.
func TestSignatureCalculation(t *testing.T) {
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
if err != nil {
t.Fatal("Error:", err)
}
req = signV4(*req, "", "", "us-east-1")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = preSignV4(*req, "", "", "us-east-1", 0)
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = signV2(*req, "", "")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = preSignV2(*req, "", "", 0)
if strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = signV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = preSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: normal credentials should have Signature query resource.")
}
req = signV2(*req, "ACCESS-KEY", "SECRET-KEY")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = preSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
if !strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: normal credentials should not have Signature query resource.")
}
}
// Tests signature type.
func TestSignatureType(t *testing.T) {
clnt := Client{}
@ -354,11 +262,11 @@ func TestBucketPolicyTypes(t *testing.T) {
// Tests optimal part size.
func TestPartSize(t *testing.T) {
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5000000000000000000)
_, _, _, err := optimalPartInfo(5000000000000000000)
if err == nil {
t.Fatal("Error: should fail")
}
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5497558138880)
totalPartsCount, partSize, lastPartSize, err := optimalPartInfo(5497558138880)
if err != nil {
t.Fatal("Error: ", err)
}
@ -371,7 +279,7 @@ func TestPartSize(t *testing.T) {
if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 241172480: got %v instead", lastPartSize)
}
totalPartsCount, partSize, lastPartSize, err = optimalPartInfo(5000000000)
_, partSize, _, err = optimalPartInfo(5000000000)
if err != nil {
t.Fatal("Error:", err)
}

View File

@ -23,6 +23,9 @@ import (
"path"
"strings"
"sync"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
)
// bucketLocationCache - Provides simple mechanism to hold bucket
@ -85,7 +88,7 @@ func (c Client) getBucketLocation(bucketName string) (string, error) {
return location, nil
}
if isAmazonChinaEndpoint(c.endpointURL) {
if s3utils.IsAmazonChinaEndpoint(c.endpointURL) {
// For china specifically we need to set everything to
// cn-north-1 for now, there is no easier way until AWS S3
// provides a cleaner compatible API across "us-east-1" and
@ -160,10 +163,7 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
@ -189,9 +189,9 @@ func (c Client) getBucketLocationRequest(bucketName string) (*http.Request, erro
// Sign the request.
if c.signature.isV4() {
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil
}

View File

@ -26,6 +26,8 @@ import (
"path"
"reflect"
"testing"
"github.com/minio/minio-go/pkg/s3signer"
)
// Test validates `newBucketLocationCache`.
@ -70,14 +72,12 @@ func TestGetBucketLocationRequest(t *testing.T) {
urlValues.Set("location", "")
// Set get bucket location always as path style.
targetURL, err := url.Parse(c.endpointURL)
if err != nil {
return nil, err
}
targetURL := c.endpointURL
targetURL.Path = path.Join(bucketName, "") + "/"
targetURL.RawQuery = urlValues.Encode()
// Get a new HTTP request for the method.
var err error
req, err = http.NewRequest("GET", targetURL.String(), nil)
if err != nil {
return nil, err
@ -93,9 +93,9 @@ func TestGetBucketLocationRequest(t *testing.T) {
// Sign the request.
if c.signature.isV4() {
req = signV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
req = s3signer.SignV4(*req, c.accessKeyID, c.secretAccessKey, "us-east-1")
} else if c.signature.isV2() {
req = signV2(*req, c.accessKeyID, c.secretAccessKey)
req = s3signer.SignV2(*req, c.accessKeyID, c.secretAccessKey)
}
return req, nil

View File

@ -84,7 +84,7 @@ func (arn Arn) String() string {
// NotificationConfig - represents one single notification configuration
// such as topic, queue or lambda configuration.
type NotificationConfig struct {
Id string `xml:"Id,omitempty"`
ID string `xml:"Id,omitempty"`
Arn Arn `xml:"-"`
Events []NotificationEventType `xml:"Event"`
Filter *Filter `xml:"Filter,omitempty"`

View File

@ -18,7 +18,7 @@ package minio
/// Multipart upload defaults.
// miniPartSize - minimum part size 5MiB per object after which
// miniPartSize - minimum part size 64MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
@ -44,3 +44,9 @@ const optimalReadBufferSize = 1024 * 1024 * 5
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
// we don't want to sign the request payload
const unsignedPayload = "UNSIGNED-PAYLOAD"
// Signature related constants.
const (
signV4Algorithm = "AWS4-HMAC-SHA256"
iso8601DateFormat = "20060102T150405Z"
)

View File

@ -41,11 +41,13 @@ type CopyConditions struct {
conditions []copyCondition
}
// NewCopyConditions - Instantiate new list of conditions.
// NewCopyConditions - Instantiate new list of conditions. This
// function is left behind for backward compatibility. The idiomatic
// way to set an empty set of copy conditions is,
// ``copyConditions := CopyConditions{}``.
//
func NewCopyConditions() CopyConditions {
return CopyConditions{
conditions: make([]copyCondition, 0),
}
return CopyConditions{}
}
// SetMatchETag - set match etag.

File diff suppressed because it is too large Load Diff

View File

@ -33,7 +33,7 @@ func main() {
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
minioClient, err := minio.New("play.minio.io:9000", "YOUR-ACCESS", "YOUR-SECRET", true)
if err != nil {
log.Fatalln(err)
}
@ -46,30 +46,11 @@ func main() {
// Indicate to our routine to exit cleanly upon return.
defer close(doneCh)
// Fetch the bucket location.
location, err := minioClient.GetBucketLocation("YOUR-BUCKET")
if err != nil {
log.Fatalln(err)
}
// Construct a new account Arn.
accountArn := minio.NewArn("minio", "sns", location, "your-account-id", "listen")
topicConfig := minio.NewNotificationConfig(accountArn)
topicConfig.AddEvents(minio.ObjectCreatedAll, minio.ObjectRemovedAll)
topicConfig.AddFilterPrefix("photos/")
topicConfig.AddFilterSuffix(".jpg")
// Now, set all previously created notification configs
bucketNotification := minio.BucketNotification{}
bucketNotification.AddTopic(topicConfig)
err = minioClient.SetBucketNotification("YOUR-BUCKET", bucketNotification)
if err != nil {
log.Fatalln("Error: " + err.Error())
}
log.Println("Success")
// Listen for bucket notifications on "mybucket" filtered by accountArn "arn:minio:sns:<location>:<your-account-id>:listen".
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", accountArn, doneCh) {
// Listen for bucket notifications on "mybucket" filtered by prefix, suffix and events.
for notificationInfo := range minioClient.ListenBucketNotification("YOUR-BUCKET", "PREFIX", "SUFFIX", []string{
"s3:ObjectCreated:*",
"s3:ObjectRemoved:*",
}, doneCh) {
if notificationInfo.Err != nil {
log.Fatalln(notificationInfo.Err)
}

View File

@ -38,10 +38,14 @@ func main() {
log.Fatalln(err)
}
err = s3Client.BucketExists("my-bucketname")
found, err := s3Client.BucketExists("my-bucketname")
if err != nil {
log.Fatalln(err)
}
log.Println("Success")
if found {
log.Println("Bucket found.")
} else {
log.Println("Bucket not found.")
}
}

View File

@ -45,7 +45,7 @@ func main() {
// All following conditions are allowed and can be combined together.
// Set copy conditions.
var copyConds = minio.NewCopyConditions()
var copyConds = minio.CopyConditions{}
// Set modified condition, copy object modified since 2014 April.
copyConds.SetModified(time.Date(2014, time.April, 0, 0, 0, 0, 0, time.UTC))

View File

@ -0,0 +1,56 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are
// dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// s3Client.TraceOn(os.Stderr)
// Fetch the policy at 'my-objectprefix'.
policies, err := s3Client.ListBucketPolicies("my-bucketname", "my-objectprefix")
if err != nil {
log.Fatalln(err)
}
// ListBucketPolicies returns a map of objects policy rules and their associated permissions
// e.g. mybucket/downloadfolder/* => readonly
// mybucket/shared/* => readwrite
for resource, permission := range policies {
log.Println(resource, " => ", permission)
}
}

View File

@ -0,0 +1,56 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"os"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-testfile, my-bucketname and
// my-objectname are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
// Enable S3 transfer accelerate endpoint.
s3Client.S3TransferAccelerate("s3-accelerate.amazonaws.com")
object, err := os.Open("my-testfile")
if err != nil {
log.Fatalln(err)
}
defer object.Close()
n, err := s3Client.PutObject("my-bucketname", "my-objectname", object, "application/octet-stream")
if err != nil {
log.Fatalln(err)
}
log.Println("Uploaded", "my-objectname", " of size: ", n, "Successfully.")
}

View File

@ -0,0 +1,61 @@
// +build ignore
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"log"
"strconv"
"github.com/minio/minio-go"
)
func main() {
// Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY, my-bucketname and my-objectname
// are dummy values, please replace them with original values.
// Requests are always secure (HTTPS) by default. Set secure=false to enable insecure (HTTP) access.
// This boolean value is the last argument for New().
// New returns an Amazon S3 compatible client object. API compatibility (v2 or v4) is automatically
// determined based on the Endpoint value.
s3Client, err := minio.New("s3.amazonaws.com", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true)
if err != nil {
log.Fatalln(err)
}
objectsCh := make(chan string)
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
for i := 0; i < 10; i++ {
objectsCh <- "/path/to/my-objectname" + strconv.Itoa(i)
}
}()
// Call RemoveObjects API
errorCh := s3Client.RemoveObjects("my-bucketname", objectsCh)
// Print errors received from RemoveObjects API
for e := range errorCh {
log.Fatalln("Failed to remove " + e.ObjectName + ", error: " + e.Err.Error())
}
log.Println("Success")
}

View File

@ -22,6 +22,7 @@ import (
"log"
"github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/policy"
)
func main() {
@ -41,11 +42,11 @@ func main() {
// s3Client.TraceOn(os.Stderr)
// Description of policy input.
// minio.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
// minio.BucketPolicyReadOnly - Set read-only operations at a prefix.
// minio.BucketPolicyWriteOnly - Set write-only operations at a prefix.
// minio.BucketPolicyReadWrite - Set read-write operations at a prefix.
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", minio.BucketPolicyReadWrite)
// policy.BucketPolicyNone - Remove any previously applied bucket policy at a prefix.
// policy.BucketPolicyReadOnly - Set read-only operations at a prefix.
// policy.BucketPolicyWriteOnly - Set write-only operations at a prefix.
// policy.BucketPolicyReadWrite - Set read-write operations at a prefix.
err = s3Client.SetBucketPolicy("my-bucketname", "my-objectprefix", policy.BucketPolicyReadWrite)
if err != nil {
log.Fatalln(err)
}

View File

@ -34,7 +34,7 @@ const (
BucketPolicyWriteOnly = "writeonly"
)
// isValidBucketPolicy - Is provided policy value supported.
// IsValidBucketPolicy - returns true if policy is valid and supported, false otherwise.
func (p BucketPolicy) IsValidBucketPolicy() bool {
switch p {
case BucketPolicyNone, BucketPolicyReadOnly, BucketPolicyReadWrite, BucketPolicyWriteOnly:
@ -508,7 +508,7 @@ func getObjectPolicy(statement Statement) (readOnly bool, writeOnly bool) {
return readOnly, writeOnly
}
// Returns policy of given bucket name, prefix in given statements.
// GetPolicy - Returns policy of given bucket name, prefix in given statements.
func GetPolicy(statements []Statement, bucketName string, prefix string) BucketPolicy {
bucketResource := awsResourcePrefix + bucketName
objectResource := awsResourcePrefix + bucketName + "/" + prefix + "*"
@ -563,8 +563,34 @@ func GetPolicy(statements []Statement, bucketName string, prefix string) BucketP
return policy
}
// Returns new statements containing policy of given bucket name and
// prefix are appended.
// GetPolicies - returns a map of policies rules of given bucket name, prefix in given statements.
func GetPolicies(statements []Statement, bucketName string) map[string]BucketPolicy {
policyRules := map[string]BucketPolicy{}
objResources := set.NewStringSet()
// Search all resources related to objects policy
for _, s := range statements {
for r := range s.Resources {
if strings.HasPrefix(r, awsResourcePrefix+bucketName+"/") {
objResources.Add(r)
}
}
}
// Pretend that policy resource as an actual object and fetch its policy
for r := range objResources {
// Put trailing * if exists in asterisk
asterisk := ""
if strings.HasSuffix(r, "*") {
r = r[:len(r)-1]
asterisk = "*"
}
objectPath := r[len(awsResourcePrefix+bucketName)+1 : len(r)]
p := GetPolicy(statements, bucketName, objectPath)
policyRules[bucketName+"/"+objectPath+asterisk] = p
}
return policyRules
}
// SetPolicy - Returns new statements containing policy of given bucket name and prefix are appended.
func SetPolicy(statements []Statement, policy BucketPolicy, bucketName string, prefix string) []Statement {
out := removeStatements(statements, bucketName, prefix)
// fmt.Println("out = ")

View File

@ -19,6 +19,7 @@ package policy
import (
"encoding/json"
"fmt"
"reflect"
"testing"
"github.com/minio/minio-go/pkg/set"
@ -1376,6 +1377,104 @@ func TestGetObjectPolicy(t *testing.T) {
}
}
// GetPolicyRules is called and the result is validated
func TestListBucketPolicies(t *testing.T) {
// Condition for read objects
downloadCondMap := make(ConditionMap)
downloadCondKeyMap := make(ConditionKeyMap)
downloadCondKeyMap.Add("s3:prefix", set.CreateStringSet("download"))
downloadCondMap.Add("StringEquals", downloadCondKeyMap)
// Condition for readwrite objects
downloadUploadCondMap := make(ConditionMap)
downloadUploadCondKeyMap := make(ConditionKeyMap)
downloadUploadCondKeyMap.Add("s3:prefix", set.CreateStringSet("both"))
downloadUploadCondMap.Add("StringEquals", downloadUploadCondKeyMap)
testCases := []struct {
statements []Statement
bucketName string
prefix string
expectedResult map[string]BucketPolicy
}{
// Empty statements, bucket name and prefix.
{[]Statement{}, "", "", map[string]BucketPolicy{}},
// Non-empty statements, empty bucket name and empty prefix.
{[]Statement{{
Actions: readOnlyBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
}}, "", "", map[string]BucketPolicy{}},
// Empty statements, non-empty bucket name and empty prefix.
{[]Statement{}, "mybucket", "", map[string]BucketPolicy{}},
// Readonly object statement
{[]Statement{
{
Actions: commonBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
},
{
Actions: readOnlyBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Conditions: downloadCondMap,
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
},
{
Actions: readOnlyObjectActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/download*"),
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/download*": BucketPolicyReadOnly}},
// Write Only
{[]Statement{
{
Actions: commonBucketActions.Union(writeOnlyBucketActions),
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
},
{
Actions: writeOnlyObjectActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/upload*"),
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/upload*": BucketPolicyWriteOnly}},
// Readwrite
{[]Statement{
{
Actions: commonBucketActions.Union(writeOnlyBucketActions),
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
},
{
Actions: readOnlyBucketActions,
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Conditions: downloadUploadCondMap,
Resources: set.CreateStringSet("arn:aws:s3:::mybucket"),
},
{
Actions: writeOnlyObjectActions.Union(readOnlyObjectActions),
Effect: "Allow",
Principal: User{AWS: set.CreateStringSet("*")},
Resources: set.CreateStringSet("arn:aws:s3:::mybucket/both*"),
}}, "mybucket", "", map[string]BucketPolicy{"mybucket/both*": BucketPolicyReadWrite}},
}
for _, testCase := range testCases {
policyRules := GetPolicies(testCase.statements, testCase.bucketName)
if !reflect.DeepEqual(testCase.expectedResult, policyRules) {
t.Fatalf("%+v:\n expected: %+v, got: %+v", testCase, testCase.expectedResult, policyRules)
}
}
}
// GetPolicy() is called and the result is validated.
func TestGetPolicy(t *testing.T) {
helloCondMap := make(ConditionMap)

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package minio
package s3signer
import (
"bytes"
@ -29,6 +29,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@ -45,22 +47,22 @@ func encodeURL2Path(u *url.URL) (path string) {
bucketName := hostSplits[0]
path = "/" + bucketName
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
if strings.HasSuffix(u.Host, ".storage.googleapis.com") {
path = "/" + strings.TrimSuffix(u.Host, ".storage.googleapis.com")
path += u.Path
path = urlEncodePath(path)
path = s3utils.EncodePath(path)
return
}
path = urlEncodePath(u.Path)
path = s3utils.EncodePath(u.Path)
return
}
// preSignV2 - presign the request in following style.
// PreSignV2 - presign the request in following style.
// https://${S3_BUCKET}.s3.amazonaws.com/${S3_OBJECT}?AWSAccessKeyId=${S3_ACCESS_KEY}&Expires=${TIMESTAMP}&Signature=${SIGNATURE}.
func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
func PreSignV2(req http.Request, accessKeyID, secretAccessKey string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@ -95,18 +97,18 @@ func preSignV2(req http.Request, accessKeyID, secretAccessKey string, expires in
query.Set("Expires", strconv.FormatInt(epochExpires, 10))
// Encode query and save.
req.URL.RawQuery = queryEncode(query)
req.URL.RawQuery = s3utils.QueryEncode(query)
// Save signature finally.
req.URL.RawQuery += "&Signature=" + urlEncodePath(signature)
req.URL.RawQuery += "&Signature=" + s3utils.EncodePath(signature)
// Return.
return &req
}
// postPresignSignatureV2 - presigned signature for PostPolicy
// PostPresignSignatureV2 - presigned signature for PostPolicy
// request.
func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
func PostPresignSignatureV2(policyBase64, secretAccessKey string) string {
hm := hmac.New(sha1.New, []byte(secretAccessKey))
hm.Write([]byte(policyBase64))
signature := base64.StdEncoding.EncodeToString(hm.Sum(nil))
@ -129,8 +131,8 @@ func postPresignSignatureV2(policyBase64, secretAccessKey string) string {
//
// CanonicalizedProtocolHeaders = <described below>
// signV2 sign the request before Do() (AWS Signature Version 2).
func signV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// SignV2 sign the request before Do() (AWS Signature Version 2).
func SignV2(req http.Request, accessKeyID, secretAccessKey string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@ -257,6 +259,7 @@ func writeCanonicalizedHeaders(buf *bytes.Buffer, req http.Request) {
// have signature-related issues
var resourceList = []string{
"acl",
"delete",
"location",
"logging",
"notification",
@ -286,7 +289,7 @@ func writeCanonicalizedResource(buf *bytes.Buffer, req http.Request, isPreSign b
// Get encoded URL path.
if len(requestURL.Query()) > 0 {
// Keep the usual queries unescaped for string to sign.
query, _ := url.QueryUnescape(queryEncode(requestURL.Query()))
query, _ := url.QueryUnescape(s3utils.QueryEncode(requestURL.Query()))
path = path + "?" + query
}
buf.WriteString(path)

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package minio
package s3signer
import (
"sort"

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
package minio
package s3signer
import (
"bytes"
@ -24,6 +24,8 @@ import (
"strconv"
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
)
// Signature and API related constants.
@ -101,8 +103,8 @@ func getScope(location string, t time.Time) string {
return scope
}
// getCredential generate a credential string.
func getCredential(accessKeyID, location string, t time.Time) string {
// GetCredential generate a credential string.
func GetCredential(accessKeyID, location string, t time.Time) string {
scope := getScope(location, t)
return accessKeyID + "/" + scope
}
@ -185,7 +187,7 @@ func getCanonicalRequest(req http.Request) string {
req.URL.RawQuery = strings.Replace(req.URL.Query().Encode(), "+", "%20", -1)
canonicalRequest := strings.Join([]string{
req.Method,
urlEncodePath(req.URL.Path),
s3utils.EncodePath(req.URL.Path),
req.URL.RawQuery,
getCanonicalHeaders(req),
getSignedHeaders(req),
@ -202,9 +204,9 @@ func getStringToSignV4(t time.Time, location, canonicalRequest string) string {
return stringToSign
}
// preSignV4 presign the request, in accordance with
// PreSignV4 presign the request, in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html.
func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
func PreSignV4(req http.Request, accessKeyID, secretAccessKey, location string, expires int64) *http.Request {
// Presign is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@ -214,7 +216,7 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
t := time.Now().UTC()
// Get credential string.
credential := getCredential(accessKeyID, location, t)
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)
@ -246,9 +248,9 @@ func preSignV4(req http.Request, accessKeyID, secretAccessKey, location string,
return &req
}
// postPresignSignatureV4 - presigned signature for PostPolicy
// PostPresignSignatureV4 - presigned signature for PostPolicy
// requests.
func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
func PostPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, location string) string {
// Get signining key.
signingkey := getSigningKey(secretAccessKey, location, t)
// Calculate signature.
@ -256,9 +258,9 @@ func postPresignSignatureV4(policyBase64 string, t time.Time, secretAccessKey, l
return signature
}
// signV4 sign the request before Do(), in accordance with
// SignV4 sign the request before Do(), in accordance with
// http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html.
func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
func SignV4(req http.Request, accessKeyID, secretAccessKey, location string) *http.Request {
// Signature calculation is not needed for anonymous credentials.
if accessKeyID == "" || secretAccessKey == "" {
return &req
@ -280,7 +282,7 @@ func signV4(req http.Request, accessKeyID, secretAccessKey, location string) *ht
signingKey := getSigningKey(secretAccessKey, location, t)
// Get credential string.
credential := getCredential(accessKeyID, location, t)
credential := GetCredential(accessKeyID, location, t)
// Get all signed headers.
signedHeaders := getSignedHeaders(req)

View File

@ -0,0 +1,70 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3signer
import (
"net/http"
"strings"
"testing"
)
// Tests signature calculation.
func TestSignatureCalculation(t *testing.T) {
req, err := http.NewRequest("GET", "https://s3.amazonaws.com", nil)
if err != nil {
t.Fatal("Error:", err)
}
req = SignV4(*req, "", "", "us-east-1")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV4(*req, "", "", "us-east-1", 0)
if strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV2(*req, "", "")
if req.Header.Get("Authorization") != "" {
t.Fatal("Error: anonymous credentials should not have Authorization header.")
}
req = PreSignV2(*req, "", "", 0)
if strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: anonymous credentials should not have Signature query resource.")
}
req = SignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV4(*req, "ACCESS-KEY", "SECRET-KEY", "us-east-1", 0)
if !strings.Contains(req.URL.RawQuery, "X-Amz-Signature") {
t.Fatal("Error: normal credentials should have Signature query resource.")
}
req = SignV2(*req, "ACCESS-KEY", "SECRET-KEY")
if req.Header.Get("Authorization") == "" {
t.Fatal("Error: normal credentials should have Authorization header.")
}
req = PreSignV2(*req, "ACCESS-KEY", "SECRET-KEY", 0)
if !strings.Contains(req.URL.RawQuery, "Signature") {
t.Fatal("Error: normal credentials should not have Signature query resource.")
}
}

View File

@ -0,0 +1,39 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3signer
import (
"crypto/hmac"
"crypto/sha256"
)
// unsignedPayload - value to be set to X-Amz-Content-Sha256 header when
const unsignedPayload = "UNSIGNED-PAYLOAD"
// sum256 calculate sha256 sum for an input byte array.
func sum256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}

View File

@ -0,0 +1,66 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3signer
import (
"fmt"
"net/url"
"testing"
)
// Tests url encoding.
func TestEncodeURL2Path(t *testing.T) {
type urlStrings struct {
objName string
encodedObjName string
}
bucketName := "bucketName"
want := []urlStrings{
{
objName: "本語",
encodedObjName: "%E6%9C%AC%E8%AA%9E",
},
{
objName: "本語.1",
encodedObjName: "%E6%9C%AC%E8%AA%9E.1",
},
{
objName: ">123>3123123",
encodedObjName: "%3E123%3E3123123",
},
{
objName: "test 1 2.txt",
encodedObjName: "test%201%202.txt",
},
{
objName: "test++ 1.txt",
encodedObjName: "test%2B%2B%201.txt",
},
}
for _, o := range want {
u, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/%s", bucketName, o.objName))
if err != nil {
t.Fatal("Error:", err)
}
urlPath := "/" + bucketName + "/" + o.encodedObjName
if urlPath != encodeURL2Path(u) {
t.Fatal("Error")
}
}
}

View File

@ -0,0 +1,183 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3utils
import (
"bytes"
"encoding/hex"
"net"
"net/url"
"regexp"
"sort"
"strings"
"unicode/utf8"
)
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
// IsValidDomain validates if input string is a valid domain name.
func IsValidDomain(host string) bool {
// See RFC 1035, RFC 3696.
host = strings.TrimSpace(host)
if len(host) == 0 || len(host) > 255 {
return false
}
// host cannot start or end with "-"
if host[len(host)-1:] == "-" || host[:1] == "-" {
return false
}
// host cannot start or end with "_"
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start or end with a "."
if host[len(host)-1:] == "." || host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
return false
}
// No need to regexp match, since the list is non-exhaustive.
// We let it valid and fail later.
return true
}
// IsValidIP parses input string for ip address validity.
func IsValidIP(ip string) bool {
return net.ParseIP(ip) != nil
}
// IsVirtualHostSupported - verifies if bucketName can be part of
// virtual host. Currently only Amazon S3 and Google Cloud Storage
// would support this.
func IsVirtualHostSupported(endpointURL url.URL, bucketName string) bool {
if endpointURL == sentinelURL {
return false
}
// bucketName can be valid but '.' in the hostname will fail SSL
// certificate validation. So do not use host-style for such buckets.
if endpointURL.Scheme == "https" && strings.Contains(bucketName, ".") {
return false
}
// Return true for all other cases
return IsAmazonEndpoint(endpointURL) || IsGoogleEndpoint(endpointURL)
}
// IsAmazonEndpoint - Match if it is exactly Amazon S3 endpoint.
func IsAmazonEndpoint(endpointURL url.URL) bool {
if IsAmazonChinaEndpoint(endpointURL) {
return true
}
return endpointURL.Host == "s3.amazonaws.com"
}
// IsAmazonChinaEndpoint - Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required
// to sign up for a separate set of account credentials unique to
// the China (Beijing) Region. Customers with existing AWS credentials
// will not be able to access resources in the new Region, and vice versa.
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
func IsAmazonChinaEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "s3.cn-north-1.amazonaws.com.cn"
}
// IsGoogleEndpoint - Match if it is exactly Google cloud storage endpoint.
func IsGoogleEndpoint(endpointURL url.URL) bool {
if endpointURL == sentinelURL {
return false
}
return endpointURL.Host == "storage.googleapis.com"
}
// Expects ascii encoded strings - from output of urlEncodePath
func percentEncodeSlash(s string) string {
return strings.Replace(s, "/", "%2F", -1)
}
// QueryEncode - encodes query values in their URL encoded form. In
// addition to the percent encoding performed by urlEncodePath() used
// here, it also percent encodes '/' (forward slash)
func QueryEncode(v url.Values) string {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := percentEncodeSlash(EncodePath(k)) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(percentEncodeSlash(EncodePath(v)))
}
}
return buf.String()
}
// if object matches reserved string, no need to encode them
var reservedObjectNames = regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
// EncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func EncodePath(pathName string) string {
if reservedObjectNames.MatchString(pathName) {
return pathName
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
}

View File

@ -0,0 +1,284 @@
/*
* Minio Go Library for Amazon S3 Compatible Cloud Storage (C) 2015, 2016 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s3utils
import (
"net/url"
"testing"
)
// Tests for 'isValidDomain(host string) bool'.
func TestIsValidDomain(t *testing.T) {
testCases := []struct {
// Input.
host string
// Expected result.
result bool
}{
{"s3.amazonaws.com", true},
{"s3.cn-north-1.amazonaws.com.cn", true},
{"s3.amazonaws.com_", false},
{"%$$$", false},
{"s3.amz.test.com", true},
{"s3.%%", false},
{"localhost", true},
{"-localhost", false},
{"", false},
{"\n \t", false},
{" ", false},
}
for i, testCase := range testCases {
result := IsValidDomain(testCase.host)
if testCase.result != result {
t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
}
}
}
// Tests validate IP address validator.
func TestIsValidIP(t *testing.T) {
testCases := []struct {
// Input.
ip string
// Expected result.
result bool
}{
{"192.168.1.1", true},
{"192.168.1", false},
{"192.168.1.1.1", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
}
for i, testCase := range testCases {
result := IsValidIP(testCase.ip)
if testCase.result != result {
t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
}
}
}
// Tests validate virtual host validator.
func TestIsVirtualHostSupported(t *testing.T) {
testCases := []struct {
url string
bucket string
// Expeceted result.
result bool
}{
{"https://s3.amazonaws.com", "my-bucket", true},
{"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
{"https://s3.amazonaws.com", "my-bucket.", false},
{"https://amazons3.amazonaws.com", "my-bucket.", false},
{"https://storage.googleapis.com/", "my-bucket", true},
{"https://mystorage.googleapis.com/", "my-bucket", false},
}
for i, testCase := range testCases {
u, err := url.Parse(testCase.url)
if err != nil {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
result := IsVirtualHostSupported(*u, testCase.bucket)
if testCase.result != result {
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
}
}
}
// Tests validate Amazon endpoint validator.
func TestIsAmazonEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"https://192.168.1.1", false},
{"192.168.1.1", false},
{"http://storage.googleapis.com", false},
{"https://storage.googleapis.com", false},
{"storage.googleapis.com", false},
{"s3.amazonaws.com", false},
{"https://amazons3.amazonaws.com", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// valid inputs.
{"https://s3.amazonaws.com", true},
{"https://s3.cn-north-1.amazonaws.com.cn", true},
}
for i, testCase := range testCases {
u, err := url.Parse(testCase.url)
if err != nil {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
result := IsAmazonEndpoint(*u)
if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
// Tests validate Amazon S3 China endpoint validator.
func TestIsAmazonChinaEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"https://192.168.1.1", false},
{"192.168.1.1", false},
{"http://storage.googleapis.com", false},
{"https://storage.googleapis.com", false},
{"storage.googleapis.com", false},
{"s3.amazonaws.com", false},
{"https://amazons3.amazonaws.com", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// s3.amazonaws.com is not a valid Amazon S3 China end point.
{"https://s3.amazonaws.com", false},
// valid input.
{"https://s3.cn-north-1.amazonaws.com.cn", true},
}
for i, testCase := range testCases {
u, err := url.Parse(testCase.url)
if err != nil {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
result := IsAmazonChinaEndpoint(*u)
if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
// Tests validate Google Cloud end point validator.
func TestIsGoogleEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"192.168.1.1", false},
{"https://192.168.1.1", false},
{"s3.amazonaws.com", false},
{"http://s3.amazonaws.com", false},
{"https://s3.amazonaws.com", false},
{"https://s3.cn-north-1.amazonaws.com.cn", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// valid inputs.
{"http://storage.googleapis.com", true},
{"https://storage.googleapis.com", true},
}
for i, testCase := range testCases {
u, err := url.Parse(testCase.url)
if err != nil {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
result := IsGoogleEndpoint(*u)
if testCase.result != result {
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
func TestPercentEncodeSlash(t *testing.T) {
testCases := []struct {
input string
output string
}{
{"test123", "test123"},
{"abc,+_1", "abc,+_1"},
{"%40prefix=test%40123", "%40prefix=test%40123"},
{"key1=val1/val2", "key1=val1%2Fval2"},
{"%40prefix=test%40123/", "%40prefix=test%40123%2F"},
}
for i, testCase := range testCases {
receivedOutput := percentEncodeSlash(testCase.input)
if testCase.output != receivedOutput {
t.Errorf(
"Test %d: Input: \"%s\" --> Expected percentEncodeSlash to return \"%s\", but it returned \"%s\" instead!",
i+1, testCase.input, testCase.output,
receivedOutput,
)
}
}
}
// Tests validate the query encoder.
func TestQueryEncode(t *testing.T) {
testCases := []struct {
queryKey string
valueToEncode []string
// Expected result.
result string
}{
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
{"@prefix", []string{"a/b/c/"}, "%40prefix=a%2Fb%2Fc%2F"},
{"prefix", []string{"test#123"}, "prefix=test%23123"},
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
{"prefix", []string{"test123"}, "prefix=test123"},
{"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
}
for i, testCase := range testCases {
urlValues := make(url.Values)
for _, valueToEncode := range testCase.valueToEncode {
urlValues.Add(testCase.queryKey, valueToEncode)
}
result := QueryEncode(urlValues)
if testCase.result != result {
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
}
}
}
// Tests validate the URL path encoder.
func TestEncodePath(t *testing.T) {
testCases := []struct {
// Input.
inputStr string
// Expected result.
result string
}{
{"thisisthe%url", "thisisthe%25url"},
{"本語", "%E6%9C%AC%E8%AA%9E"},
{"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
{">123", "%3E123"},
{"myurl#link", "myurl%23link"},
{"space in url", "space%20in%20url"},
{"url+path", "url%2Bpath"},
}
for i, testCase := range testCases {
result := EncodePath(testCase.inputStr)
if testCase.result != result {
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
}
}
}

View File

@ -25,8 +25,8 @@ import (
// StringSet - uses map as set of strings.
type StringSet map[string]struct{}
// keys - returns StringSet keys.
func (set StringSet) keys() []string {
// ToSlice - returns StringSet as string slice.
func (set StringSet) ToSlice() []string {
keys := make([]string, 0, len(set))
for k := range set {
keys = append(keys, k)
@ -141,7 +141,7 @@ func (set StringSet) Union(sset StringSet) StringSet {
// MarshalJSON - converts to JSON data.
func (set StringSet) MarshalJSON() ([]byte, error) {
return json.Marshal(set.keys())
return json.Marshal(set.ToSlice())
}
// UnmarshalJSON - parses JSON data and creates new set with it.
@ -169,7 +169,7 @@ func (set *StringSet) UnmarshalJSON(data []byte) error {
// String - returns printable string of the set.
func (set StringSet) String() string {
return fmt.Sprintf("%s", set.keys())
return fmt.Sprintf("%s", set.ToSlice())
}
// NewStringSet - creates new string set.

View File

@ -17,6 +17,7 @@
package set
import (
"fmt"
"strings"
"testing"
)
@ -320,3 +321,27 @@ func TestStringSetString(t *testing.T) {
}
}
}
// StringSet.ToSlice() is called with series of cases for valid and erroneous inputs and the result is validated.
func TestStringSetToSlice(t *testing.T) {
testCases := []struct {
set StringSet
expectedResult string
}{
// Test empty set.
{NewStringSet(), `[]`},
// Test set with empty value.
{CreateStringSet(""), `[]`},
// Test set with value.
{CreateStringSet("foo"), `[foo]`},
// Test set with value.
{CreateStringSet("foo", "bar"), `[bar foo]`},
}
for _, testCase := range testCases {
sslice := testCase.set.ToSlice()
if str := fmt.Sprintf("%s", sslice); str != testCase.expectedResult {
t.Fatalf("expected: %s, got: %s", testCase.expectedResult, str)
}
}
}

View File

@ -149,6 +149,24 @@ func (p *PostPolicy) SetContentLengthRange(min, max int64) error {
return nil
}
// SetSuccessStatusAction - Sets the status success code of the object for this policy
// based upload.
func (p *PostPolicy) SetSuccessStatusAction(status string) error {
if strings.TrimSpace(status) == "" || status == "" {
return ErrInvalidArgument("Status is empty")
}
policyCond := policyCondition{
matchType: "eq",
condition: "$success_action_status",
value: status,
}
if err := p.addNewPolicy(policyCond); err != nil {
return err
}
p.formData["success_action_status"] = status
return nil
}
// addNewPolicy - internal helper to validate adding new policies.
func (p *PostPolicy) addNewPolicy(policyCond policyCondition) error {
if policyCond.matchType == "" || policyCond.condition == "" || policyCond.value == "" {

View File

@ -0,0 +1,52 @@
package minio
import "time"
// newRetryTimerContinous creates a timer with exponentially increasing delays forever.
func (c Client) newRetryTimerContinous(unit time.Duration, cap time.Duration, jitter float64, doneCh chan struct{}) <-chan int {
attemptCh := make(chan int)
// normalize jitter to the range [0, 1.0]
if jitter < NoJitter {
jitter = NoJitter
}
if jitter > MaxJitter {
jitter = MaxJitter
}
// computes the exponential backoff duration according to
// https://www.awsarchitectureblog.com/2015/03/backoff.html
exponentialBackoffWait := func(attempt int) time.Duration {
// 1<<uint(attempt) below could overflow, so limit the value of attempt
maxAttempt := 30
if attempt > maxAttempt {
attempt = maxAttempt
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
if sleep > cap {
sleep = cap
}
if jitter != NoJitter {
sleep -= time.Duration(c.random.Float64() * float64(sleep) * jitter)
}
return sleep
}
go func() {
defer close(attemptCh)
var nextBackoff int
for {
select {
// Attempts starts.
case attemptCh <- nextBackoff:
nextBackoff++
case <-doneCh:
// Stop the routine.
return
}
time.Sleep(exponentialBackoffWait(nextBackoff))
}
}()
return attemptCh
}

View File

@ -20,9 +20,12 @@ package minio
// "cn-north-1" adds support for AWS China.
var awsS3EndpointMap = map[string]string{
"us-east-1": "s3.amazonaws.com",
"us-east-2": "s3-us-east-2.amazonaws.com",
"us-west-2": "s3-us-west-2.amazonaws.com",
"us-west-1": "s3-us-west-1.amazonaws.com",
"ca-central-1": "s3.ca-central-1.amazonaws.com",
"eu-west-1": "s3-eu-west-1.amazonaws.com",
"eu-west-2": "s3-eu-west-2.amazonaws.com",
"eu-central-1": "s3-eu-central-1.amazonaws.com",
"ap-south-1": "s3-ap-south-1.amazonaws.com",
"ap-southeast-1": "s3-ap-southeast-1.amazonaws.com",

View File

@ -21,6 +21,7 @@ import (
"encoding/xml"
"io/ioutil"
"net/http"
"strconv"
)
// Contains common used utilities for tests.
@ -62,3 +63,12 @@ func encodeResponse(response interface{}) []byte {
encode.Encode(response)
return bytesBuffer.Bytes()
}
// Convert string to bool and always return true if any error
func mustParseBool(str string) bool {
b, err := strconv.ParseBool(str)
if err != nil {
return true
}
return b
}

View File

@ -17,11 +17,8 @@
package minio
import (
"bytes"
"crypto/hmac"
"crypto/md5"
"crypto/sha256"
"encoding/hex"
"encoding/xml"
"io"
"io/ioutil"
@ -29,10 +26,11 @@ import (
"net/http"
"net/url"
"regexp"
"sort"
"strings"
"time"
"unicode/utf8"
"github.com/minio/minio-go/pkg/s3utils"
)
// xmlDecoder provide decoded value in xml.
@ -55,13 +53,6 @@ func sumMD5(data []byte) []byte {
return hash.Sum(nil)
}
// sumHMAC calculate hmac between two input byte array.
func sumHMAC(key []byte, data []byte) []byte {
hash := hmac.New(sha256.New, key)
hash.Write(data)
return hash.Sum(nil)
}
// getEndpointURL - construct a new endpoint.
func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if strings.Contains(endpoint, ":") {
@ -69,12 +60,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
if err != nil {
return nil, err
}
if !isValidIP(host) && !isValidDomain(host) {
if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
} else {
if !isValidIP(endpoint) && !isValidDomain(endpoint) {
if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) {
msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards."
return nil, ErrInvalidArgument(msg)
}
@ -93,45 +84,12 @@ func getEndpointURL(endpoint string, secure bool) (*url.URL, error) {
}
// Validate incoming endpoint URL.
if err := isValidEndpointURL(endpointURL.String()); err != nil {
if err := isValidEndpointURL(*endpointURL); err != nil {
return nil, err
}
return endpointURL, nil
}
// isValidDomain validates if input string is a valid domain name.
func isValidDomain(host string) bool {
// See RFC 1035, RFC 3696.
host = strings.TrimSpace(host)
if len(host) == 0 || len(host) > 255 {
return false
}
// host cannot start or end with "-"
if host[len(host)-1:] == "-" || host[:1] == "-" {
return false
}
// host cannot start or end with "_"
if host[len(host)-1:] == "_" || host[:1] == "_" {
return false
}
// host cannot start or end with a "."
if host[len(host)-1:] == "." || host[:1] == "." {
return false
}
// All non alphanumeric characters are invalid.
if strings.ContainsAny(host, "`~!@#$%^&*()+={}[]|\\\"';:><?/") {
return false
}
// No need to regexp match, since the list is non-exhaustive.
// We let it valid and fail later.
return true
}
// isValidIP parses input string for ip address validity.
func isValidIP(ip string) bool {
return net.ParseIP(ip) != nil
}
// closeResponse close non nil response with any response Body.
// convenient wrapper to drain any remaining data on response body.
//
@ -152,92 +110,24 @@ func closeResponse(resp *http.Response) {
}
}
// isVirtualHostSupported - verifies if bucketName can be part of
// virtual host. Currently only Amazon S3 and Google Cloud Storage
// would support this.
func isVirtualHostSupported(endpointURL string, bucketName string) bool {
url, err := url.Parse(endpointURL)
if err != nil {
return false
}
// bucketName can be valid but '.' in the hostname will fail SSL
// certificate validation. So do not use host-style for such buckets.
if url.Scheme == "https" && strings.Contains(bucketName, ".") {
return false
}
// Return true for all other cases
return isAmazonEndpoint(endpointURL) || isGoogleEndpoint(endpointURL)
}
// Match if it is exactly Amazon S3 endpoint.
func isAmazonEndpoint(endpointURL string) bool {
if isAmazonChinaEndpoint(endpointURL) {
return true
}
url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "s3.amazonaws.com" {
return true
}
return false
}
// Match if it is exactly Amazon S3 China endpoint.
// Customers who wish to use the new Beijing Region are required
// to sign up for a separate set of account credentials unique to
// the China (Beijing) Region. Customers with existing AWS credentials
// will not be able to access resources in the new Region, and vice versa.
// For more info https://aws.amazon.com/about-aws/whats-new/2013/12/18/announcing-the-aws-china-beijing-region/
func isAmazonChinaEndpoint(endpointURL string) bool {
if endpointURL == "" {
return false
}
url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "s3.cn-north-1.amazonaws.com.cn" {
return true
}
return false
}
// Match if it is exactly Google cloud storage endpoint.
func isGoogleEndpoint(endpointURL string) bool {
if endpointURL == "" {
return false
}
url, err := url.Parse(endpointURL)
if err != nil {
return false
}
if url.Host == "storage.googleapis.com" {
return true
}
return false
}
// Sentinel URL is the default url value which is invalid.
var sentinelURL = url.URL{}
// Verify if input endpoint URL is valid.
func isValidEndpointURL(endpointURL string) error {
if endpointURL == "" {
func isValidEndpointURL(endpointURL url.URL) error {
if endpointURL == sentinelURL {
return ErrInvalidArgument("Endpoint url cannot be empty.")
}
url, err := url.Parse(endpointURL)
if err != nil {
if endpointURL.Path != "/" && endpointURL.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if url.Path != "/" && url.Path != "" {
return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.")
}
if strings.Contains(endpointURL, ".amazonaws.com") {
if !isAmazonEndpoint(endpointURL) {
if strings.Contains(endpointURL.Host, ".amazonaws.com") {
if !s3utils.IsAmazonEndpoint(endpointURL) {
return ErrInvalidArgument("Amazon S3 endpoint should be 's3.amazonaws.com'.")
}
}
if strings.Contains(endpointURL, ".googleapis.com") {
if !isGoogleEndpoint(endpointURL) {
if strings.Contains(endpointURL.Host, ".googleapis.com") {
if !s3utils.IsGoogleEndpoint(endpointURL) {
return ErrInvalidArgument("Google Cloud Storage endpoint should be 'storage.googleapis.com'.")
}
}
@ -260,6 +150,9 @@ func isValidExpiry(expires time.Duration) error {
// style requests instead for such buckets.
var validBucketName = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
// Invalid bucket name with double dot.
var invalidDotBucketName = regexp.MustCompile(`\.\.`)
// isValidBucketName - verify bucket name in accordance with
// - http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html
func isValidBucketName(bucketName string) error {
@ -275,7 +168,7 @@ func isValidBucketName(bucketName string) error {
if bucketName[0] == '.' || bucketName[len(bucketName)-1] == '.' {
return ErrInvalidBucketName("Bucket name cannot start or end with a '.' dot.")
}
if match, _ := regexp.MatchString("\\.\\.", bucketName); match {
if invalidDotBucketName.MatchString(bucketName) {
return ErrInvalidBucketName("Bucket name cannot have successive periods.")
}
if !validBucketName.MatchString(bucketName) {
@ -310,67 +203,25 @@ func isValidObjectPrefix(objectPrefix string) error {
return nil
}
// queryEncode - encodes query values in their URL encoded form.
func queryEncode(v url.Values) string {
if v == nil {
return ""
// make a copy of http.Header
func cloneHeader(h http.Header) http.Header {
h2 := make(http.Header, len(h))
for k, vv := range h {
vv2 := make([]string, len(vv))
copy(vv2, vv)
h2[k] = vv2
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
prefix := urlEncodePath(k) + "="
for _, v := range vs {
if buf.Len() > 0 {
buf.WriteByte('&')
}
buf.WriteString(prefix)
buf.WriteString(urlEncodePath(v))
}
}
return buf.String()
return h2
}
// urlEncodePath encode the strings from UTF-8 byte representations to HTML hex escape sequences
//
// This is necessary since regular url.Parse() and url.Encode() functions do not support UTF-8
// non english characters cannot be parsed due to the nature in which url.Encode() is written
//
// This function on the other hand is a direct replacement for url.Encode() technique to support
// pretty much every UTF-8 character.
func urlEncodePath(pathName string) string {
// if object matches reserved string, no need to encode them
reservedNames := regexp.MustCompile("^[a-zA-Z0-9-_.~/]+$")
if reservedNames.MatchString(pathName) {
return pathName
// Filter relevant response headers from
// the HEAD, GET http response. The function takes
// a list of headers which are filtered out and
// returned as a new http header.
func filterHeader(header http.Header, filterKeys []string) (filteredHeader http.Header) {
filteredHeader = cloneHeader(header)
for _, key := range filterKeys {
filteredHeader.Del(key)
}
var encodedPathname string
for _, s := range pathName {
if 'A' <= s && s <= 'Z' || 'a' <= s && s <= 'z' || '0' <= s && s <= '9' { // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
}
switch s {
case '-', '_', '.', '~', '/': // §2.3 Unreserved characters (mark)
encodedPathname = encodedPathname + string(s)
continue
default:
len := utf8.RuneLen(s)
if len < 0 {
// if utf8 cannot convert return the same string as is
return pathName
}
u := make([]byte, len)
utf8.EncodeRune(u, s)
for _, r := range u {
hex := hex.EncodeToString([]byte{r})
encodedPathname = encodedPathname + "%" + strings.ToUpper(hex)
}
}
}
return encodedPathname
return filteredHeader
}

View File

@ -17,11 +17,27 @@ package minio
import (
"fmt"
"net/http"
"net/url"
"testing"
"time"
)
// Tests filter header function by filtering out
// some custom header keys.
func TestFilterHeader(t *testing.T) {
header := http.Header{}
header.Set("Content-Type", "binary/octet-stream")
header.Set("Content-Encoding", "gzip")
newHeader := filterHeader(header, []string{"Content-Type"})
if len(newHeader) > 1 {
t.Fatalf("Unexpected size of the returned header, should be 1, got %d", len(newHeader))
}
if newHeader.Get("Content-Encoding") != "gzip" {
t.Fatalf("Unexpected content-encoding value, expected 'gzip', got %s", newHeader.Get("Content-Encoding"))
}
}
// Tests for 'getEndpointURL(endpoint string, inSecure bool)'.
func TestGetEndpointURL(t *testing.T) {
testCases := []struct {
@ -74,35 +90,6 @@ func TestGetEndpointURL(t *testing.T) {
}
}
// Tests for 'isValidDomain(host string) bool'.
func TestIsValidDomain(t *testing.T) {
testCases := []struct {
// Input.
host string
// Expected result.
result bool
}{
{"s3.amazonaws.com", true},
{"s3.cn-north-1.amazonaws.com.cn", true},
{"s3.amazonaws.com_", false},
{"%$$$", false},
{"s3.amz.test.com", true},
{"s3.%%", false},
{"localhost", true},
{"-localhost", false},
{"", false},
{"\n \t", false},
{" ", false},
}
for i, testCase := range testCases {
result := isValidDomain(testCase.host)
if testCase.result != result {
t.Errorf("Test %d: Expected isValidDomain test to be '%v', but found '%v' instead", i+1, testCase.result, result)
}
}
}
// Tests validate end point validator.
func TestIsValidEndpointURL(t *testing.T) {
testCases := []struct {
@ -125,161 +112,33 @@ func TestIsValidEndpointURL(t *testing.T) {
}
for i, testCase := range testCases {
err := isValidEndpointURL(testCase.url)
var u url.URL
if testCase.url == "" {
u = sentinelURL
} else {
u1, err := url.Parse(testCase.url)
if err != nil {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
u = *u1
}
err := isValidEndpointURL(u)
if err != nil && testCase.shouldPass {
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err.Error())
t.Errorf("Test %d: Expected to pass, but failed with: <ERROR> %s", i+1, err)
}
if err == nil && !testCase.shouldPass {
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err.Error())
t.Errorf("Test %d: Expected to fail with <ERROR> \"%s\", but passed instead", i+1, testCase.err)
}
// Failed as expected, but does it fail for the expected reason.
if err != nil && !testCase.shouldPass {
if err.Error() != testCase.err.Error() {
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err.Error(), err.Error())
t.Errorf("Test %d: Expected to fail with error \"%s\", but instead failed with error \"%s\" instead", i+1, testCase.err, err)
}
}
}
}
// Tests validate IP address validator.
func TestIsValidIP(t *testing.T) {
testCases := []struct {
// Input.
ip string
// Expected result.
result bool
}{
{"192.168.1.1", true},
{"192.168.1", false},
{"192.168.1.1.1", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
}
for i, testCase := range testCases {
result := isValidIP(testCase.ip)
if testCase.result != result {
t.Errorf("Test %d: Expected isValidIP to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.ip, result)
}
}
}
// Tests validate virtual host validator.
func TestIsVirtualHostSupported(t *testing.T) {
testCases := []struct {
url string
bucket string
// Expeceted result.
result bool
}{
{"https://s3.amazonaws.com", "my-bucket", true},
{"https://s3.cn-north-1.amazonaws.com.cn", "my-bucket", true},
{"https://s3.amazonaws.com", "my-bucket.", false},
{"https://amazons3.amazonaws.com", "my-bucket.", false},
{"https://storage.googleapis.com/", "my-bucket", true},
{"https://mystorage.googleapis.com/", "my-bucket", false},
}
for i, testCase := range testCases {
result := isVirtualHostSupported(testCase.url, testCase.bucket)
if testCase.result != result {
t.Errorf("Test %d: Expected isVirtualHostSupported to be '%v' for input url \"%s\" and bucket \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, testCase.bucket, result)
}
}
}
// Tests validate Amazon endpoint validator.
func TestIsAmazonEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"https://192.168.1.1", false},
{"192.168.1.1", false},
{"http://storage.googleapis.com", false},
{"https://storage.googleapis.com", false},
{"storage.googleapis.com", false},
{"s3.amazonaws.com", false},
{"https://amazons3.amazonaws.com", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// valid inputs.
{"https://s3.amazonaws.com", true},
{"https://s3.cn-north-1.amazonaws.com.cn", true},
}
for i, testCase := range testCases {
result := isAmazonEndpoint(testCase.url)
if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
// Tests validate Amazon S3 China endpoint validator.
func TestIsAmazonChinaEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"https://192.168.1.1", false},
{"192.168.1.1", false},
{"http://storage.googleapis.com", false},
{"https://storage.googleapis.com", false},
{"storage.googleapis.com", false},
{"s3.amazonaws.com", false},
{"https://amazons3.amazonaws.com", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// s3.amazonaws.com is not a valid Amazon S3 China end point.
{"https://s3.amazonaws.com", false},
// valid input.
{"https://s3.cn-north-1.amazonaws.com.cn", true},
}
for i, testCase := range testCases {
result := isAmazonChinaEndpoint(testCase.url)
if testCase.result != result {
t.Errorf("Test %d: Expected isAmazonEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
// Tests validate Google Cloud end point validator.
func TestIsGoogleEndpoint(t *testing.T) {
testCases := []struct {
url string
// Expected result.
result bool
}{
{"192.168.1.1", false},
{"https://192.168.1.1", false},
{"s3.amazonaws.com", false},
{"http://s3.amazonaws.com", false},
{"https://s3.amazonaws.com", false},
{"https://s3.cn-north-1.amazonaws.com.cn", false},
{"-192.168.1.1", false},
{"260.192.1.1", false},
// valid inputs.
{"http://storage.googleapis.com", true},
{"https://storage.googleapis.com", true},
}
for i, testCase := range testCases {
result := isGoogleEndpoint(testCase.url)
if testCase.result != result {
t.Errorf("Test %d: Expected isGoogleEndpoint to be '%v' for input \"%s\", but found it to be '%v' instead", i+1, testCase.result, testCase.url, result)
}
}
}
// Tests validate the expiry time validator.
func TestIsValidExpiry(t *testing.T) {
testCases := []struct {
@ -355,56 +214,3 @@ func TestIsValidBucketName(t *testing.T) {
}
}
// Tests validate the query encoder.
func TestQueryEncode(t *testing.T) {
testCases := []struct {
queryKey string
valueToEncode []string
// Expected result.
result string
}{
{"prefix", []string{"test@123", "test@456"}, "prefix=test%40123&prefix=test%40456"},
{"@prefix", []string{"test@123"}, "%40prefix=test%40123"},
{"prefix", []string{"test#123"}, "prefix=test%23123"},
{"prefix#", []string{"test#123"}, "prefix%23=test%23123"},
{"prefix", []string{"test123"}, "prefix=test123"},
{"prefix", []string{"test本語123", "test123"}, "prefix=test%E6%9C%AC%E8%AA%9E123&prefix=test123"},
}
for i, testCase := range testCases {
urlValues := make(url.Values)
for _, valueToEncode := range testCase.valueToEncode {
urlValues.Add(testCase.queryKey, valueToEncode)
}
result := queryEncode(urlValues)
if testCase.result != result {
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
}
}
}
// Tests validate the URL path encoder.
func TestUrlEncodePath(t *testing.T) {
testCases := []struct {
// Input.
inputStr string
// Expected result.
result string
}{
{"thisisthe%url", "thisisthe%25url"},
{"本語", "%E6%9C%AC%E8%AA%9E"},
{"本語.1", "%E6%9C%AC%E8%AA%9E.1"},
{">123", "%3E123"},
{"myurl#link", "myurl%23link"},
{"space in url", "space%20in%20url"},
{"url+path", "url%2Bpath"},
}
for i, testCase := range testCases {
result := urlEncodePath(testCase.inputStr)
if testCase.result != result {
t.Errorf("Test %d: Expected queryEncode result to be \"%s\", but found it to be \"%s\" instead", i+1, testCase.result, result)
}
}
}