Move to vendor
Signed-off-by: Olivier Gambier <olivier@docker.com>
This commit is contained in:
parent
c8d8e7e357
commit
77e69b9cf3
1268 changed files with 34 additions and 24 deletions
185
vendor/github.com/docker/goamz/LICENSE
generated
vendored
Normal file
185
vendor/github.com/docker/goamz/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
This software is licensed under the LGPLv3, included below.
|
||||
|
||||
As a special exception to the GNU Lesser General Public License version 3
|
||||
("LGPL3"), the copyright holders of this Library give you permission to
|
||||
convey to a third party a Combined Work that links statically or dynamically
|
||||
to this Library without providing any Minimal Corresponding Source or
|
||||
Minimal Application Code as set out in 4d or providing the installation
|
||||
information set out in section 4e, provided that you comply with the other
|
||||
provisions of LGPL3 and provided that you meet, for the Application the
|
||||
terms and conditions of the license(s) which apply to the Application.
|
||||
|
||||
Except as stated in this special exception, the provisions of LGPL3 will
|
||||
continue to comply in full to this Library. If you modify this Library, you
|
||||
may apply this exception to your version of this Library, but you are not
|
||||
obliged to do so. If you do not wish to do so, delete this exception
|
||||
statement from your version. This exception does not (and cannot) modify any
|
||||
license terms which apply to the Application, with which you must still
|
||||
comply.
|
||||
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
|
||||
This version of the GNU Lesser General Public License incorporates
|
||||
the terms and conditions of version 3 of the GNU General Public
|
||||
License, supplemented by the additional permissions listed below.
|
||||
|
||||
0. Additional Definitions.
|
||||
|
||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
||||
General Public License.
|
||||
|
||||
"The Library" refers to a covered work governed by this License,
|
||||
other than an Application or a Combined Work as defined below.
|
||||
|
||||
An "Application" is any work that makes use of an interface provided
|
||||
by the Library, but which is not otherwise based on the Library.
|
||||
Defining a subclass of a class defined by the Library is deemed a mode
|
||||
of using an interface provided by the Library.
|
||||
|
||||
A "Combined Work" is a work produced by combining or linking an
|
||||
Application with the Library. The particular version of the Library
|
||||
with which the Combined Work was made is also called the "Linked
|
||||
Version".
|
||||
|
||||
The "Minimal Corresponding Source" for a Combined Work means the
|
||||
Corresponding Source for the Combined Work, excluding any source code
|
||||
for portions of the Combined Work that, considered in isolation, are
|
||||
based on the Application, and not on the Linked Version.
|
||||
|
||||
The "Corresponding Application Code" for a Combined Work means the
|
||||
object code and/or source code for the Application, including any data
|
||||
and utility programs needed for reproducing the Combined Work from the
|
||||
Application, but excluding the System Libraries of the Combined Work.
|
||||
|
||||
1. Exception to Section 3 of the GNU GPL.
|
||||
|
||||
You may convey a covered work under sections 3 and 4 of this License
|
||||
without being bound by section 3 of the GNU GPL.
|
||||
|
||||
2. Conveying Modified Versions.
|
||||
|
||||
If you modify a copy of the Library, and, in your modifications, a
|
||||
facility refers to a function or data to be supplied by an Application
|
||||
that uses the facility (other than as an argument passed when the
|
||||
facility is invoked), then you may convey a copy of the modified
|
||||
version:
|
||||
|
||||
a) under this License, provided that you make a good faith effort to
|
||||
ensure that, in the event an Application does not supply the
|
||||
function or data, the facility still operates, and performs
|
||||
whatever part of its purpose remains meaningful, or
|
||||
|
||||
b) under the GNU GPL, with none of the additional permissions of
|
||||
this License applicable to that copy.
|
||||
|
||||
3. Object Code Incorporating Material from Library Header Files.
|
||||
|
||||
The object code form of an Application may incorporate material from
|
||||
a header file that is part of the Library. You may convey such object
|
||||
code under terms of your choice, provided that, if the incorporated
|
||||
material is not limited to numerical parameters, data structure
|
||||
layouts and accessors, or small macros, inline functions and templates
|
||||
(ten or fewer lines in length), you do both of the following:
|
||||
|
||||
a) Give prominent notice with each copy of the object code that the
|
||||
Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
4. Combined Works.
|
||||
|
||||
You may convey a Combined Work under terms of your choice that,
|
||||
taken together, effectively do not restrict modification of the
|
||||
portions of the Library contained in the Combined Work and reverse
|
||||
engineering for debugging such modifications, if you also do each of
|
||||
the following:
|
||||
|
||||
a) Give prominent notice with each copy of the Combined Work that
|
||||
the Library is used in it and that the Library and its use are
|
||||
covered by this License.
|
||||
|
||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
||||
document.
|
||||
|
||||
c) For a Combined Work that displays copyright notices during
|
||||
execution, include the copyright notice for the Library among
|
||||
these notices, as well as a reference directing the user to the
|
||||
copies of the GNU GPL and this license document.
|
||||
|
||||
d) Do one of the following:
|
||||
|
||||
0) Convey the Minimal Corresponding Source under the terms of this
|
||||
License, and the Corresponding Application Code in a form
|
||||
suitable for, and under terms that permit, the user to
|
||||
recombine or relink the Application with a modified version of
|
||||
the Linked Version to produce a modified Combined Work, in the
|
||||
manner specified by section 6 of the GNU GPL for conveying
|
||||
Corresponding Source.
|
||||
|
||||
1) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (a) uses at run time
|
||||
a copy of the Library already present on the user's computer
|
||||
system, and (b) will operate properly with a modified version
|
||||
of the Library that is interface-compatible with the Linked
|
||||
Version.
|
||||
|
||||
e) Provide Installation Information, but only if you would otherwise
|
||||
be required to provide such information under section 6 of the
|
||||
GNU GPL, and only to the extent that such information is
|
||||
necessary to install and execute a modified version of the
|
||||
Combined Work produced by recombining or relinking the
|
||||
Application with a modified version of the Linked Version. (If
|
||||
you use option 4d0, the Installation Information must accompany
|
||||
the Minimal Corresponding Source and Corresponding Application
|
||||
Code. If you use option 4d1, you must provide the Installation
|
||||
Information in the manner specified by section 6 of the GNU GPL
|
||||
for conveying Corresponding Source.)
|
||||
|
||||
5. Combined Libraries.
|
||||
|
||||
You may place library facilities that are a work based on the
|
||||
Library side by side in a single library together with other library
|
||||
facilities that are not Applications and are not covered by this
|
||||
License, and convey such a combined library under terms of your
|
||||
choice, if you do both of the following:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work based
|
||||
on the Library, uncombined with any other library facilities,
|
||||
conveyed under the terms of this License.
|
||||
|
||||
b) Give prominent notice with the combined library that part of it
|
||||
is a work based on the Library, and explaining where to find the
|
||||
accompanying uncombined form of the same work.
|
||||
|
||||
6. Revised Versions of the GNU Lesser General Public License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions
|
||||
of the GNU Lesser General Public License from time to time. Such new
|
||||
versions will be similar in spirit to the present version, but may
|
||||
differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Library as you received it specifies that a certain numbered version
|
||||
of the GNU Lesser General Public License "or any later version"
|
||||
applies to it, you have the option of following the terms and
|
||||
conditions either of that published version or of any later version
|
||||
published by the Free Software Foundation. If the Library as you
|
||||
received it does not specify a version number of the GNU Lesser
|
||||
General Public License, you may choose any version of the GNU Lesser
|
||||
General Public License ever published by the Free Software Foundation.
|
||||
|
||||
If the Library as you received it specifies that a proxy can decide
|
||||
whether future versions of the GNU Lesser General Public License shall
|
||||
apply, that proxy's public statement of acceptance of any version is
|
||||
permanent authorization for you to choose that version for the
|
||||
Library.
|
74
vendor/github.com/docker/goamz/aws/attempt.go
generated
vendored
Normal file
74
vendor/github.com/docker/goamz/aws/attempt.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// AttemptStrategy represents a strategy for waiting for an action
|
||||
// to complete successfully. This is an internal type used by the
|
||||
// implementation of other goamz packages.
|
||||
type AttemptStrategy struct {
|
||||
Total time.Duration // total duration of attempt.
|
||||
Delay time.Duration // interval between each try in the burst.
|
||||
Min int // minimum number of retries; overrides Total
|
||||
}
|
||||
|
||||
type Attempt struct {
|
||||
strategy AttemptStrategy
|
||||
last time.Time
|
||||
end time.Time
|
||||
force bool
|
||||
count int
|
||||
}
|
||||
|
||||
// Start begins a new sequence of attempts for the given strategy.
|
||||
func (s AttemptStrategy) Start() *Attempt {
|
||||
now := time.Now()
|
||||
return &Attempt{
|
||||
strategy: s,
|
||||
last: now,
|
||||
end: now.Add(s.Total),
|
||||
force: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Next waits until it is time to perform the next attempt or returns
|
||||
// false if it is time to stop trying.
|
||||
func (a *Attempt) Next() bool {
|
||||
now := time.Now()
|
||||
sleep := a.nextSleep(now)
|
||||
if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
|
||||
return false
|
||||
}
|
||||
a.force = false
|
||||
if sleep > 0 && a.count > 0 {
|
||||
time.Sleep(sleep)
|
||||
now = time.Now()
|
||||
}
|
||||
a.count++
|
||||
a.last = now
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *Attempt) nextSleep(now time.Time) time.Duration {
|
||||
sleep := a.strategy.Delay - now.Sub(a.last)
|
||||
if sleep < 0 {
|
||||
return 0
|
||||
}
|
||||
return sleep
|
||||
}
|
||||
|
||||
// HasNext returns whether another attempt will be made if the current
|
||||
// one fails. If it returns true, the following call to Next is
|
||||
// guaranteed to return true.
|
||||
func (a *Attempt) HasNext() bool {
|
||||
if a.force || a.strategy.Min > a.count {
|
||||
return true
|
||||
}
|
||||
now := time.Now()
|
||||
if now.Add(a.nextSleep(now)).Before(a.end) {
|
||||
a.force = true
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
636
vendor/github.com/docker/goamz/aws/aws.go
generated
vendored
Normal file
636
vendor/github.com/docker/goamz/aws/aws.go
generated
vendored
Normal file
|
@ -0,0 +1,636 @@
|
|||
//
|
||||
// goamz - Go packages to interact with the Amazon Web Services.
|
||||
//
|
||||
// https://wiki.ubuntu.com/goamz
|
||||
//
|
||||
// Copyright (c) 2011 Canonical Ltd.
|
||||
//
|
||||
// Written by Gustavo Niemeyer <gustavo.niemeyer@canonical.com>
|
||||
//
|
||||
package aws
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/user"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Regular expressions for INI files
|
||||
var (
|
||||
iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`)
|
||||
iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`)
|
||||
)
|
||||
|
||||
// Defines the valid signers
|
||||
const (
|
||||
V2Signature = iota
|
||||
V4Signature = iota
|
||||
Route53Signature = iota
|
||||
)
|
||||
|
||||
// Defines the service endpoint and correct Signer implementation to use
|
||||
// to sign requests for this endpoint
|
||||
type ServiceInfo struct {
|
||||
Endpoint string
|
||||
Signer uint
|
||||
}
|
||||
|
||||
// Region defines the URLs where AWS services may be accessed.
|
||||
//
|
||||
// See http://goo.gl/d8BP1 for more details.
|
||||
type Region struct {
|
||||
Name string // the canonical name of this region.
|
||||
EC2Endpoint ServiceInfo
|
||||
S3Endpoint string
|
||||
S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name.
|
||||
S3LocationConstraint bool // true if this region requires a LocationConstraint declaration.
|
||||
S3LowercaseBucket bool // true if the region requires bucket names to be lower case.
|
||||
SDBEndpoint string
|
||||
SNSEndpoint string
|
||||
SQSEndpoint string
|
||||
SESEndpoint string
|
||||
IAMEndpoint string
|
||||
ELBEndpoint string
|
||||
KMSEndpoint string
|
||||
DynamoDBEndpoint string
|
||||
CloudWatchServicepoint ServiceInfo
|
||||
AutoScalingEndpoint string
|
||||
RDSEndpoint ServiceInfo
|
||||
KinesisEndpoint string
|
||||
STSEndpoint string
|
||||
CloudFormationEndpoint string
|
||||
ElastiCacheEndpoint string
|
||||
}
|
||||
|
||||
var Regions = map[string]Region{
|
||||
APNortheast.Name: APNortheast,
|
||||
APNortheast2.Name: APNortheast2,
|
||||
APSoutheast.Name: APSoutheast,
|
||||
APSoutheast2.Name: APSoutheast2,
|
||||
EUCentral.Name: EUCentral,
|
||||
EUWest.Name: EUWest,
|
||||
USEast.Name: USEast,
|
||||
USWest.Name: USWest,
|
||||
USWest2.Name: USWest2,
|
||||
USGovWest.Name: USGovWest,
|
||||
SAEast.Name: SAEast,
|
||||
CNNorth1.Name: CNNorth1,
|
||||
}
|
||||
|
||||
// Designates a signer interface suitable for signing AWS requests, params
|
||||
// should be appropriately encoded for the request before signing.
|
||||
//
|
||||
// A signer should be initialized with Auth and the appropriate endpoint.
|
||||
type Signer interface {
|
||||
Sign(method, path string, params map[string]string)
|
||||
}
|
||||
|
||||
// An AWS Service interface with the API to query the AWS service
|
||||
//
|
||||
// Supplied as an easy way to mock out service calls during testing.
|
||||
type AWSService interface {
|
||||
// Queries the AWS service at a given method/path with the params and
|
||||
// returns an http.Response and error
|
||||
Query(method, path string, params map[string]string) (*http.Response, error)
|
||||
// Builds an error given an XML payload in the http.Response, can be used
|
||||
// to process an error if the status code is not 200 for example.
|
||||
BuildError(r *http.Response) error
|
||||
}
|
||||
|
||||
// Implements a Server Query/Post API to easily query AWS services and build
|
||||
// errors when desired
|
||||
type Service struct {
|
||||
service ServiceInfo
|
||||
signer Signer
|
||||
}
|
||||
|
||||
// Create a base set of params for an action
|
||||
func MakeParams(action string) map[string]string {
|
||||
params := make(map[string]string)
|
||||
params["Action"] = action
|
||||
return params
|
||||
}
|
||||
|
||||
// Create a new AWS server to handle making requests
|
||||
func NewService(auth Auth, service ServiceInfo) (s *Service, err error) {
|
||||
var signer Signer
|
||||
switch service.Signer {
|
||||
case V2Signature:
|
||||
signer, err = NewV2Signer(auth, service)
|
||||
// case V4Signature:
|
||||
// signer, err = NewV4Signer(auth, service, Regions["eu-west-1"])
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported signer for service")
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s = &Service{service: service, signer: signer}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) {
|
||||
params["Timestamp"] = time.Now().UTC().Format(time.RFC3339)
|
||||
u, err := url.Parse(s.service.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = path
|
||||
|
||||
s.signer.Sign(method, path, params)
|
||||
if method == "GET" {
|
||||
u.RawQuery = multimap(params).Encode()
|
||||
resp, err = http.Get(u.String())
|
||||
} else if method == "POST" {
|
||||
resp, err = http.PostForm(u.String(), multimap(params))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Service) BuildError(r *http.Response) error {
|
||||
errors := ErrorResponse{}
|
||||
xml.NewDecoder(r.Body).Decode(&errors)
|
||||
var err Error
|
||||
err = errors.Errors
|
||||
err.RequestId = errors.RequestId
|
||||
err.StatusCode = r.StatusCode
|
||||
if err.Message == "" {
|
||||
err.Message = r.Status
|
||||
}
|
||||
return &err
|
||||
}
|
||||
|
||||
type ServiceError interface {
|
||||
error
|
||||
ErrorCode() string
|
||||
}
|
||||
|
||||
type ErrorResponse struct {
|
||||
Errors Error `xml:"Error"`
|
||||
RequestId string // A unique ID for tracking the request
|
||||
}
|
||||
|
||||
type Error struct {
|
||||
StatusCode int
|
||||
Type string
|
||||
Code string
|
||||
Message string
|
||||
RequestId string
|
||||
}
|
||||
|
||||
func (err *Error) Error() string {
|
||||
return fmt.Sprintf("Type: %s, Code: %s, Message: %s",
|
||||
err.Type, err.Code, err.Message,
|
||||
)
|
||||
}
|
||||
|
||||
func (err *Error) ErrorCode() string {
|
||||
return err.Code
|
||||
}
|
||||
|
||||
type Auth struct {
|
||||
AccessKey, SecretKey string
|
||||
token string
|
||||
expiration time.Time
|
||||
}
|
||||
|
||||
func (a *Auth) Token() string {
|
||||
if a.token == "" {
|
||||
return ""
|
||||
}
|
||||
if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock
|
||||
auth, err := GetAuth("", "", "", time.Time{})
|
||||
if err == nil {
|
||||
*a = auth
|
||||
}
|
||||
}
|
||||
return a.token
|
||||
}
|
||||
|
||||
func (a *Auth) Expiration() time.Time {
|
||||
return a.expiration
|
||||
}
|
||||
|
||||
// To be used with other APIs that return auth credentials such as STS
|
||||
func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth {
|
||||
return &Auth{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
token: token,
|
||||
expiration: expiration,
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseMetadata
|
||||
type ResponseMetadata struct {
|
||||
RequestId string // A unique ID for tracking the request
|
||||
}
|
||||
|
||||
type BaseResponse struct {
|
||||
ResponseMetadata ResponseMetadata
|
||||
}
|
||||
|
||||
var unreserved = make([]bool, 128)
|
||||
var hex = "0123456789ABCDEF"
|
||||
|
||||
func init() {
|
||||
// RFC3986
|
||||
u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~"
|
||||
for _, c := range u {
|
||||
unreserved[c] = true
|
||||
}
|
||||
}
|
||||
|
||||
func multimap(p map[string]string) url.Values {
|
||||
q := make(url.Values, len(p))
|
||||
for k, v := range p {
|
||||
q[k] = []string{v}
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
type credentials struct {
|
||||
Code string
|
||||
LastUpdated string
|
||||
Type string
|
||||
AccessKeyId string
|
||||
SecretAccessKey string
|
||||
Token string
|
||||
Expiration string
|
||||
}
|
||||
|
||||
// GetMetaData retrieves instance metadata about the current machine.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details.
|
||||
func GetMetaData(path string) (contents []byte, err error) {
|
||||
c := http.Client{
|
||||
Transport: &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
deadline := time.Now().Add(5 * time.Second)
|
||||
c, err := net.DialTimeout(netw, addr, time.Second*2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.SetDeadline(deadline)
|
||||
return c, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
url := "http://169.254.169.254/latest/meta-data/" + path
|
||||
|
||||
resp, err := c.Get(url)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return []byte(body), err
|
||||
}
|
||||
|
||||
func GetRegion(regionName string) (region Region) {
|
||||
region = Regions[regionName]
|
||||
return
|
||||
}
|
||||
|
||||
// GetInstanceCredentials creates an Auth based on the instance's role credentials.
|
||||
// If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned.
|
||||
// For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
|
||||
func GetInstanceCredentials() (cred credentials, err error) {
|
||||
credentialPath := "iam/security-credentials/"
|
||||
|
||||
// Get the instance role
|
||||
role, err := GetMetaData(credentialPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Get the instance role credentials
|
||||
credentialJSON, err := GetMetaData(credentialPath + string(role))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(credentialJSON), &cred)
|
||||
return
|
||||
}
|
||||
|
||||
// GetAuth creates an Auth based on either passed in credentials,
|
||||
// environment information or instance based role credentials.
|
||||
func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) {
|
||||
// First try passed in credentials
|
||||
if accessKey != "" && secretKey != "" {
|
||||
return Auth{accessKey, secretKey, token, expiration}, nil
|
||||
}
|
||||
|
||||
// Next try to get auth from the environment
|
||||
auth, err = EnvAuth()
|
||||
if err == nil {
|
||||
// Found auth, return
|
||||
return
|
||||
}
|
||||
|
||||
// Next try getting auth from the instance role
|
||||
cred, err := GetInstanceCredentials()
|
||||
if err == nil {
|
||||
// Found auth, return
|
||||
auth.AccessKey = cred.AccessKeyId
|
||||
auth.SecretKey = cred.SecretAccessKey
|
||||
auth.token = cred.Token
|
||||
exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err)
|
||||
}
|
||||
auth.expiration = exptdate
|
||||
return auth, err
|
||||
}
|
||||
|
||||
// Next try getting auth from the credentials file
|
||||
auth, err = CredentialFileAuth("", "", time.Minute*5)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
|
||||
//err = errors.New("No valid AWS authentication found")
|
||||
err = fmt.Errorf("No valid AWS authentication found: %s", err)
|
||||
return auth, err
|
||||
}
|
||||
|
||||
// EnvAuth creates an Auth based on environment information.
|
||||
// The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment
|
||||
// variables are used.
|
||||
func EnvAuth() (auth Auth, err error) {
|
||||
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
|
||||
if auth.AccessKey == "" {
|
||||
auth.AccessKey = os.Getenv("AWS_ACCESS_KEY")
|
||||
}
|
||||
|
||||
auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
|
||||
if auth.SecretKey == "" {
|
||||
auth.SecretKey = os.Getenv("AWS_SECRET_KEY")
|
||||
}
|
||||
if auth.AccessKey == "" {
|
||||
err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment")
|
||||
}
|
||||
if auth.SecretKey == "" {
|
||||
err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CredentialFileAuth creates and Auth based on a credentials file. The file
|
||||
// contains various authentication profiles for use with AWS.
|
||||
//
|
||||
// The credentials file, which is used by other AWS SDKs, is documented at
|
||||
// http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs
|
||||
func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) {
|
||||
if profile == "" {
|
||||
profile = os.Getenv("AWS_DEFAULT_PROFILE")
|
||||
if profile == "" {
|
||||
profile = os.Getenv("AWS_PROFILE")
|
||||
if profile == "" {
|
||||
profile = "default"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if filePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return auth, err
|
||||
}
|
||||
|
||||
filePath = path.Join(u.HomeDir, ".aws", "credentials")
|
||||
}
|
||||
|
||||
// read the file, then parse the INI
|
||||
contents, err := ioutil.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
profiles := parseINI(string(contents))
|
||||
profileData, ok := profiles[profile]
|
||||
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain the profile")
|
||||
return
|
||||
}
|
||||
|
||||
keyId, ok := profileData["aws_access_key_id"]
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain required attribute aws_access_key_id")
|
||||
return
|
||||
}
|
||||
|
||||
secretKey, ok := profileData["aws_secret_access_key"]
|
||||
if !ok {
|
||||
err = errors.New("The credentials file did not contain required attribute aws_secret_access_key")
|
||||
return
|
||||
}
|
||||
|
||||
auth.AccessKey = keyId
|
||||
auth.SecretKey = secretKey
|
||||
|
||||
if token, ok := profileData["aws_session_token"]; ok {
|
||||
auth.token = token
|
||||
}
|
||||
|
||||
auth.expiration = time.Now().Add(expiration)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// parseINI takes the contents of a credentials file and returns a map, whose keys
|
||||
// are the various profiles, and whose values are maps of the settings for the
|
||||
// profiles
|
||||
func parseINI(fileContents string) map[string]map[string]string {
|
||||
profiles := make(map[string]map[string]string)
|
||||
|
||||
lines := strings.Split(fileContents, "\n")
|
||||
|
||||
var currentSection map[string]string
|
||||
for _, line := range lines {
|
||||
// remove comments, which start with a semi-colon
|
||||
if split := strings.Split(line, ";"); len(split) > 1 {
|
||||
line = split[0]
|
||||
}
|
||||
|
||||
// check if the line is the start of a profile.
|
||||
//
|
||||
// for example:
|
||||
// [default]
|
||||
//
|
||||
// otherwise, check for the proper setting
|
||||
// property=value
|
||||
if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 {
|
||||
currentSection = make(map[string]string)
|
||||
profiles[sectMatch[1]] = currentSection
|
||||
} else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil {
|
||||
currentSection[setMatch[1]] = setMatch[2]
|
||||
}
|
||||
}
|
||||
|
||||
return profiles
|
||||
}
|
||||
|
||||
// Encode takes a string and URI-encodes it in a way suitable
|
||||
// to be used in AWS signatures.
|
||||
func Encode(s string) string {
|
||||
encode := false
|
||||
for i := 0; i != len(s); i++ {
|
||||
c := s[i]
|
||||
if c > 127 || !unreserved[c] {
|
||||
encode = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !encode {
|
||||
return s
|
||||
}
|
||||
e := make([]byte, len(s)*3)
|
||||
ei := 0
|
||||
for i := 0; i != len(s); i++ {
|
||||
c := s[i]
|
||||
if c > 127 || !unreserved[c] {
|
||||
e[ei] = '%'
|
||||
e[ei+1] = hex[c>>4]
|
||||
e[ei+2] = hex[c&0xF]
|
||||
ei += 3
|
||||
} else {
|
||||
e[ei] = c
|
||||
ei += 1
|
||||
}
|
||||
}
|
||||
return string(e[:ei])
|
||||
}
|
||||
|
||||
func dialTimeout(network, addr string) (net.Conn, error) {
|
||||
return net.DialTimeout(network, addr, time.Duration(2*time.Second))
|
||||
}
|
||||
|
||||
func AvailabilityZone() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceRegion() string {
|
||||
az := AvailabilityZone()
|
||||
if az == "unknown" {
|
||||
return az
|
||||
} else {
|
||||
region := az[:len(az)-1]
|
||||
return region
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceId() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func InstanceType() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type")
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ServerLocalIp() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4")
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ServerPublicIp() string {
|
||||
transport := http.Transport{Dial: dialTimeout}
|
||||
client := http.Client{
|
||||
Transport: &transport,
|
||||
}
|
||||
resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4")
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "127.0.0.1"
|
||||
} else {
|
||||
return string(body)
|
||||
}
|
||||
}
|
||||
}
|
124
vendor/github.com/docker/goamz/aws/client.go
generated
vendored
Normal file
124
vendor/github.com/docker/goamz/aws/client.go
generated
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RetryableFunc func(*http.Request, *http.Response, error) bool
|
||||
type WaitFunc func(try int)
|
||||
type DeadlineFunc func() time.Time
|
||||
|
||||
type ResilientTransport struct {
|
||||
// Timeout is the maximum amount of time a dial will wait for
|
||||
// a connect to complete.
|
||||
//
|
||||
// The default is no timeout.
|
||||
//
|
||||
// With or without a timeout, the operating system may impose
|
||||
// its own earlier timeout. For instance, TCP timeouts are
|
||||
// often around 3 minutes.
|
||||
DialTimeout time.Duration
|
||||
|
||||
// MaxTries, if non-zero, specifies the number of times we will retry on
|
||||
// failure. Retries are only attempted for temporary network errors or known
|
||||
// safe failures.
|
||||
MaxTries int
|
||||
Deadline DeadlineFunc
|
||||
ShouldRetry RetryableFunc
|
||||
Wait WaitFunc
|
||||
transport *http.Transport
|
||||
}
|
||||
|
||||
// Convenience method for creating an http client
|
||||
func NewClient(rt *ResilientTransport) *http.Client {
|
||||
rt.transport = &http.Transport{
|
||||
Dial: func(netw, addr string) (net.Conn, error) {
|
||||
c, err := net.DialTimeout(netw, addr, rt.DialTimeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.SetDeadline(rt.Deadline())
|
||||
return c, nil
|
||||
},
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
}
|
||||
// TODO: Would be nice is ResilientTransport allowed clients to initialize
|
||||
// with http.Transport attributes.
|
||||
return &http.Client{
|
||||
Transport: rt,
|
||||
}
|
||||
}
|
||||
|
||||
var retryingTransport = &ResilientTransport{
|
||||
Deadline: func() time.Time {
|
||||
return time.Now().Add(5 * time.Second)
|
||||
},
|
||||
DialTimeout: 10 * time.Second,
|
||||
MaxTries: 3,
|
||||
ShouldRetry: awsRetry,
|
||||
Wait: ExpBackoff,
|
||||
}
|
||||
|
||||
// Exported default client
|
||||
var RetryingClient = NewClient(retryingTransport)
|
||||
|
||||
func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return t.tries(req)
|
||||
}
|
||||
|
||||
// Retry a request a maximum of t.MaxTries times.
|
||||
// We'll only retry if the proper criteria are met.
|
||||
// If a wait function is specified, wait that amount of time
|
||||
// In between requests.
|
||||
func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) {
|
||||
for try := 0; try < t.MaxTries; try += 1 {
|
||||
res, err = t.transport.RoundTrip(req)
|
||||
|
||||
if !t.ShouldRetry(req, res, err) {
|
||||
break
|
||||
}
|
||||
if res != nil {
|
||||
res.Body.Close()
|
||||
}
|
||||
if t.Wait != nil {
|
||||
t.Wait(try)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ExpBackoff(try int) {
|
||||
time.Sleep(100 * time.Millisecond *
|
||||
time.Duration(math.Exp2(float64(try))))
|
||||
}
|
||||
|
||||
func LinearBackoff(try int) {
|
||||
time.Sleep(time.Duration(try*100) * time.Millisecond)
|
||||
}
|
||||
|
||||
// Decide if we should retry a request.
|
||||
// In general, the criteria for retrying a request is described here
|
||||
// http://docs.aws.amazon.com/general/latest/gr/api-retries.html
|
||||
func awsRetry(req *http.Request, res *http.Response, err error) bool {
|
||||
retry := false
|
||||
|
||||
// Retry if there's a temporary network error.
|
||||
if neterr, ok := err.(net.Error); ok {
|
||||
if neterr.Temporary() {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
// Retry if we get a 5xx series error.
|
||||
if res != nil {
|
||||
if res.StatusCode >= 500 && res.StatusCode < 600 {
|
||||
retry = true
|
||||
}
|
||||
}
|
||||
|
||||
return retry
|
||||
}
|
289
vendor/github.com/docker/goamz/aws/regions.go
generated
vendored
Normal file
289
vendor/github.com/docker/goamz/aws/regions.go
generated
vendored
Normal file
|
@ -0,0 +1,289 @@
|
|||
package aws
|
||||
|
||||
var USGovWest = Region{
|
||||
"us-gov-west-1",
|
||||
ServiceInfo{"https://ec2.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-fips-us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.us-gov-west-1.amazonaws.com",
|
||||
"https://sqs.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.us-gov.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://dynamodb.us-gov-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-gov-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature},
|
||||
"",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-gov-west-1.amazonaws.com",
|
||||
"",
|
||||
}
|
||||
|
||||
var USEast = Region{
|
||||
"us-east-1",
|
||||
ServiceInfo{"https://ec2.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://s3-external-1.amazonaws.com",
|
||||
"",
|
||||
false,
|
||||
false,
|
||||
"https://sdb.amazonaws.com",
|
||||
"https://sns.us-east-1.amazonaws.com",
|
||||
"https://sqs.us-east-1.amazonaws.com",
|
||||
"https://email.us-east-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-east-1.amazonaws.com",
|
||||
"https://kms.us-east-1.amazonaws.com",
|
||||
"https://dynamodb.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-east-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-east-1.amazonaws.com",
|
||||
"https://elasticache.us-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var USWest = Region{
|
||||
"us-west-1",
|
||||
ServiceInfo{"https://ec2.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-us-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.us-west-1.amazonaws.com",
|
||||
"https://sns.us-west-1.amazonaws.com",
|
||||
"https://sqs.us-west-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-1.amazonaws.com",
|
||||
"https://kms.us-west-1.amazonaws.com",
|
||||
"https://dynamodb.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-west-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-west-1.amazonaws.com",
|
||||
"https://elasticache.us-west-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var USWest2 = Region{
|
||||
"us-west-2",
|
||||
ServiceInfo{"https://ec2.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://s3-us-west-2.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.us-west-2.amazonaws.com",
|
||||
"https://sns.us-west-2.amazonaws.com",
|
||||
"https://sqs.us-west-2.amazonaws.com",
|
||||
"https://email.us-west-2.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.us-west-2.amazonaws.com",
|
||||
"https://kms.us-west-2.amazonaws.com",
|
||||
"https://dynamodb.us-west-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.us-west-2.amazonaws.com",
|
||||
ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature},
|
||||
"https://kinesis.us-west-2.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.us-west-2.amazonaws.com",
|
||||
"https://elasticache.us-west-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var EUWest = Region{
|
||||
"eu-west-1",
|
||||
ServiceInfo{"https://ec2.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://s3-eu-west-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.eu-west-1.amazonaws.com",
|
||||
"https://sns.eu-west-1.amazonaws.com",
|
||||
"https://sqs.eu-west-1.amazonaws.com",
|
||||
"https://email.eu-west-1.amazonaws.com",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-west-1.amazonaws.com",
|
||||
"https://kms.eu-west-1.amazonaws.com",
|
||||
"https://dynamodb.eu-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-west-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.eu-west-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.eu-west-1.amazonaws.com",
|
||||
"https://elasticache.eu-west-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var EUCentral = Region{
|
||||
"eu-central-1",
|
||||
ServiceInfo{"https://ec2.eu-central-1.amazonaws.com", V4Signature},
|
||||
"https://s3-eu-central-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.eu-central-1.amazonaws.com",
|
||||
"https://sns.eu-central-1.amazonaws.com",
|
||||
"https://sqs.eu-central-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.eu-central-1.amazonaws.com",
|
||||
"https://kms.eu-central-1.amazonaws.com",
|
||||
"https://dynamodb.eu-central-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.eu-central-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.eu-central-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.eu-central-1.amazonaws.com",
|
||||
"",
|
||||
}
|
||||
|
||||
var APSoutheast = Region{
|
||||
"ap-southeast-1",
|
||||
ServiceInfo{"https://ec2.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-southeast-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-southeast-1.amazonaws.com",
|
||||
"https://sns.ap-southeast-1.amazonaws.com",
|
||||
"https://sqs.ap-southeast-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-1.amazonaws.com",
|
||||
"https://kms.ap-southeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-southeast-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-southeast-1.amazonaws.com",
|
||||
"https://elasticache.ap-southeast-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var APSoutheast2 = Region{
|
||||
"ap-southeast-2",
|
||||
ServiceInfo{"https://ec2.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-southeast-2.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-southeast-2.amazonaws.com",
|
||||
"https://sns.ap-southeast-2.amazonaws.com",
|
||||
"https://sqs.ap-southeast-2.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-southeast-2.amazonaws.com",
|
||||
"https://kms.ap-southeast-2.amazonaws.com",
|
||||
"https://dynamodb.ap-southeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-southeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-southeast-2.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-southeast-2.amazonaws.com",
|
||||
"https://elasticache.ap-southeast-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var APNortheast = Region{
|
||||
"ap-northeast-1",
|
||||
ServiceInfo{"https://ec2.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-northeast-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.ap-northeast-1.amazonaws.com",
|
||||
"https://sns.ap-northeast-1.amazonaws.com",
|
||||
"https://sqs.ap-northeast-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-northeast-1.amazonaws.com",
|
||||
"https://kms.ap-northeast-1.amazonaws.com",
|
||||
"https://dynamodb.ap-northeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-northeast-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-northeast-1.amazonaws.com",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.ap-northeast-1.amazonaws.com",
|
||||
"https://elasticache.ap-northeast-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var APNortheast2 = Region{
|
||||
"ap-northeast-2",
|
||||
ServiceInfo{"https://ec2.ap-northeast-2.amazonaws.com", V2Signature},
|
||||
"https://s3-ap-northeast-2.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.ap-northeast-2.amazonaws.com",
|
||||
"https://sqs.ap-northeast-2.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.ap-northeast-2.amazonaws.com",
|
||||
"https://kms.ap-northeast-2.amazonaws.com",
|
||||
"https://dynamodb.ap-northeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.ap-northeast-2.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.ap-northeast-2.amazonaws.com",
|
||||
ServiceInfo{"https://rds.ap-northeast-2.amazonaws.com", V2Signature},
|
||||
"https://kinesis.ap-northeast-2.amazonaws.com",
|
||||
"https://sts.ap-northeast-2.amazonaws.com",
|
||||
"https://cloudformation.ap-northeast-2.amazonaws.com",
|
||||
"https://elasticache.ap-northeast-2.amazonaws.com",
|
||||
}
|
||||
|
||||
var SAEast = Region{
|
||||
"sa-east-1",
|
||||
ServiceInfo{"https://ec2.sa-east-1.amazonaws.com", V2Signature},
|
||||
"https://s3-sa-east-1.amazonaws.com",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"https://sdb.sa-east-1.amazonaws.com",
|
||||
"https://sns.sa-east-1.amazonaws.com",
|
||||
"https://sqs.sa-east-1.amazonaws.com",
|
||||
"",
|
||||
"https://iam.amazonaws.com",
|
||||
"https://elasticloadbalancing.sa-east-1.amazonaws.com",
|
||||
"https://kms.sa-east-1.amazonaws.com",
|
||||
"https://dynamodb.sa-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature},
|
||||
"https://autoscaling.sa-east-1.amazonaws.com",
|
||||
ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature},
|
||||
"",
|
||||
"https://sts.amazonaws.com",
|
||||
"https://cloudformation.sa-east-1.amazonaws.com",
|
||||
"https://elasticache.sa-east-1.amazonaws.com",
|
||||
}
|
||||
|
||||
var CNNorth1 = Region{
|
||||
"cn-north-1",
|
||||
ServiceInfo{"https://ec2.cn-north-1.amazonaws.com.cn", V2Signature},
|
||||
"https://s3.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
true,
|
||||
true,
|
||||
"",
|
||||
"https://sns.cn-north-1.amazonaws.com.cn",
|
||||
"https://sqs.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://iam.cn-north-1.amazonaws.com.cn",
|
||||
"https://elasticloadbalancing.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"https://dynamodb.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://monitoring.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"https://autoscaling.cn-north-1.amazonaws.com.cn",
|
||||
ServiceInfo{"https://rds.cn-north-1.amazonaws.com.cn", V4Signature},
|
||||
"",
|
||||
"https://sts.cn-north-1.amazonaws.com.cn",
|
||||
"",
|
||||
"",
|
||||
}
|
136
vendor/github.com/docker/goamz/aws/retry.go
generated
vendored
Normal file
136
vendor/github.com/docker/goamz/aws/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
maxDelay = 20 * time.Second
|
||||
defaultScale = 300 * time.Millisecond
|
||||
throttlingScale = 500 * time.Millisecond
|
||||
throttlingScaleRange = throttlingScale / 4
|
||||
defaultMaxRetries = 3
|
||||
dynamoDBScale = 25 * time.Millisecond
|
||||
dynamoDBMaxRetries = 10
|
||||
)
|
||||
|
||||
// A RetryPolicy encapsulates a strategy for implementing client retries.
|
||||
//
|
||||
// Default implementations are provided which match the AWS SDKs.
|
||||
type RetryPolicy interface {
|
||||
// ShouldRetry returns whether a client should retry a failed request.
|
||||
ShouldRetry(target string, r *http.Response, err error, numRetries int) bool
|
||||
|
||||
// Delay returns the time a client should wait before issuing a retry.
|
||||
Delay(target string, r *http.Response, err error, numRetries int) time.Duration
|
||||
}
|
||||
|
||||
// DefaultRetryPolicy implements the AWS SDK default retry policy.
|
||||
//
|
||||
// It will retry up to 3 times, and uses an exponential backoff with a scale
|
||||
// factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of
|
||||
// throttling, the delay will also include some randomness.
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90.
|
||||
type DefaultRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return shouldRetry(r, err, numRetries, defaultMaxRetries)
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
scale := defaultScale
|
||||
if err, ok := err.(*Error); ok && isThrottlingException(err) {
|
||||
scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange)))
|
||||
}
|
||||
return exponentialBackoff(numRetries, scale)
|
||||
}
|
||||
|
||||
// DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy.
|
||||
//
|
||||
// It will retry up to 10 times, and uses an exponential backoff with a scale
|
||||
// factor of 25ms (25ms, 50ms, 100ms, ...).
|
||||
//
|
||||
// See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103.
|
||||
type DynamoDBRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return shouldRetry(r, err, numRetries, dynamoDBMaxRetries)
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
return exponentialBackoff(numRetries, dynamoDBScale)
|
||||
}
|
||||
|
||||
// NeverRetryPolicy never retries requests and returns immediately on failure.
|
||||
type NeverRetryPolicy struct {
|
||||
}
|
||||
|
||||
// ShouldRetry implements the RetryPolicy ShouldRetry method.
|
||||
func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Delay implements the RetryPolicy Delay method.
|
||||
func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
// shouldRetry determines if we should retry the request.
|
||||
//
|
||||
// See http://docs.aws.amazon.com/general/latest/gr/api-retries.html.
|
||||
func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool {
|
||||
// Once we've exceeded the max retry attempts, game over.
|
||||
if numRetries >= maxRetries {
|
||||
return false
|
||||
}
|
||||
|
||||
// Always retry temporary network errors.
|
||||
if err, ok := err.(net.Error); ok && err.Temporary() {
|
||||
return true
|
||||
}
|
||||
|
||||
// Always retry 5xx responses.
|
||||
if r != nil && r.StatusCode >= 500 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Always retry throttling exceptions.
|
||||
if err, ok := err.(ServiceError); ok && isThrottlingException(err) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Other classes of failures indicate a problem with the request. Retrying
|
||||
// won't help.
|
||||
return false
|
||||
}
|
||||
|
||||
func exponentialBackoff(numRetries int, scale time.Duration) time.Duration {
|
||||
if numRetries < 0 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
delay := (1 << uint(numRetries)) * scale
|
||||
if delay > maxDelay {
|
||||
return maxDelay
|
||||
}
|
||||
return delay
|
||||
}
|
||||
|
||||
func isThrottlingException(err ServiceError) bool {
|
||||
switch err.ErrorCode() {
|
||||
case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
472
vendor/github.com/docker/goamz/aws/sign.go
generated
vendored
Normal file
472
vendor/github.com/docker/goamz/aws/sign.go
generated
vendored
Normal file
|
@ -0,0 +1,472 @@
|
|||
package aws
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// AWS specifies that the parameters in a signed request must
|
||||
// be provided in the natural order of the keys. This is distinct
|
||||
// from the natural order of the encoded value of key=value.
|
||||
// Percent and gocheck.Equals affect the sorting order.
|
||||
func EncodeSorted(values url.Values) string {
|
||||
// preallocate the arrays for perfomance
|
||||
keys := make([]string, 0, len(values))
|
||||
sarray := make([]string, 0, len(values))
|
||||
for k, _ := range values {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
for _, v := range values[k] {
|
||||
sarray = append(sarray, Encode(k)+"="+Encode(v))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(sarray, "&")
|
||||
}
|
||||
|
||||
type V2Signer struct {
|
||||
auth Auth
|
||||
service ServiceInfo
|
||||
host string
|
||||
}
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) {
|
||||
u, err := url.Parse(service.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &V2Signer{auth: auth, service: service, host: u.Host}, nil
|
||||
}
|
||||
|
||||
func (s *V2Signer) Sign(method, path string, params map[string]string) {
|
||||
params["AWSAccessKeyId"] = s.auth.AccessKey
|
||||
params["SignatureVersion"] = "2"
|
||||
params["SignatureMethod"] = "HmacSHA256"
|
||||
if s.auth.Token() != "" {
|
||||
params["SecurityToken"] = s.auth.Token()
|
||||
}
|
||||
// AWS specifies that the parameters in a signed request must
|
||||
// be provided in the natural order of the keys. This is distinct
|
||||
// from the natural order of the encoded value of key=value.
|
||||
// Percent and gocheck.Equals affect the sorting order.
|
||||
var keys, sarray []string
|
||||
for k, _ := range params {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
sarray = append(sarray, Encode(k)+"="+Encode(params[k]))
|
||||
}
|
||||
joined := strings.Join(sarray, "&")
|
||||
payload := method + "\n" + s.host + "\n" + path + "\n" + joined
|
||||
hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
params["Signature"] = string(signature)
|
||||
}
|
||||
|
||||
func (s *V2Signer) SignRequest(req *http.Request) error {
|
||||
req.ParseForm()
|
||||
req.Form.Set("AWSAccessKeyId", s.auth.AccessKey)
|
||||
req.Form.Set("SignatureVersion", "2")
|
||||
req.Form.Set("SignatureMethod", "HmacSHA256")
|
||||
if s.auth.Token() != "" {
|
||||
req.Form.Set("SecurityToken", s.auth.Token())
|
||||
}
|
||||
|
||||
payload := req.Method + "\n" + req.URL.Host + "\n" + req.URL.Path + "\n" + EncodeSorted(req.Form)
|
||||
hash := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
req.Form.Set("Signature", string(signature))
|
||||
|
||||
req.URL.RawQuery = req.Form.Encode()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Common date formats for signing requests
|
||||
const (
|
||||
ISO8601BasicFormat = "20060102T150405Z"
|
||||
ISO8601BasicFormatShort = "20060102"
|
||||
)
|
||||
|
||||
type Route53Signer struct {
|
||||
auth Auth
|
||||
}
|
||||
|
||||
func NewRoute53Signer(auth Auth) *Route53Signer {
|
||||
return &Route53Signer{auth: auth}
|
||||
}
|
||||
|
||||
// Creates the authorize signature based on the date stamp and secret key
|
||||
func (s *Route53Signer) getHeaderAuthorize(message string) string {
|
||||
hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey))
|
||||
hmacSha256.Write([]byte(message))
|
||||
cryptedString := hmacSha256.Sum(nil)
|
||||
|
||||
return base64.StdEncoding.EncodeToString(cryptedString)
|
||||
}
|
||||
|
||||
// Adds all the required headers for AWS Route53 API to the request
|
||||
// including the authorization
|
||||
func (s *Route53Signer) Sign(req *http.Request) {
|
||||
date := time.Now().UTC().Format(time.RFC1123)
|
||||
delete(req.Header, "Date")
|
||||
req.Header.Set("Date", date)
|
||||
|
||||
authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s",
|
||||
s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date))
|
||||
|
||||
req.Header.Set("Host", req.Host)
|
||||
req.Header.Set("X-Amzn-Authorization", authHeader)
|
||||
req.Header.Set("Content-Type", "application/xml")
|
||||
if s.auth.Token() != "" {
|
||||
req.Header.Set("X-Amz-Security-Token", s.auth.Token())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
The V4Signer encapsulates all of the functionality to sign a request with the AWS
|
||||
Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
|
||||
*/
|
||||
type V4Signer struct {
|
||||
auth Auth
|
||||
serviceName string
|
||||
region Region
|
||||
// Add the x-amz-content-sha256 header
|
||||
IncludeXAmzContentSha256 bool
|
||||
}
|
||||
|
||||
/*
|
||||
Return a new instance of a V4Signer capable of signing AWS requests.
|
||||
*/
|
||||
func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer {
|
||||
return &V4Signer{
|
||||
auth: auth,
|
||||
serviceName: serviceName,
|
||||
region: region,
|
||||
IncludeXAmzContentSha256: false,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz)
|
||||
|
||||
The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date"
|
||||
or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires
|
||||
the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from
|
||||
the request.Host.
|
||||
|
||||
The signed request will include a new "Authorization" header indicating that the request has been signed.
|
||||
|
||||
Any changes to the request after signing the request will invalidate the signature.
|
||||
*/
|
||||
func (s *V4Signer) Sign(req *http.Request) {
|
||||
req.Header.Set("host", req.Host) // host header must be included as a signed header
|
||||
t := s.requestTime(req) // Get request time
|
||||
|
||||
payloadHash := ""
|
||||
|
||||
if _, ok := req.Form["X-Amz-Expires"]; ok {
|
||||
// We are authenticating the the request by using query params
|
||||
// (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html)
|
||||
payloadHash = "UNSIGNED-PAYLOAD"
|
||||
req.Header.Del("x-amz-date")
|
||||
|
||||
req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)}
|
||||
req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"}
|
||||
req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)}
|
||||
req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)}
|
||||
req.URL.RawQuery = req.Form.Encode()
|
||||
} else {
|
||||
payloadHash = s.payloadHash(req)
|
||||
if s.IncludeXAmzContentSha256 {
|
||||
req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash
|
||||
}
|
||||
}
|
||||
creq := s.canonicalRequest(req, payloadHash) // Build canonical request
|
||||
sts := s.stringToSign(t, creq) // Build string to sign
|
||||
signature := s.signature(t, sts) // Calculate the AWS Signature Version 4
|
||||
auth := s.authorization(req.Header, t, signature) // Create Authorization header value
|
||||
|
||||
if _, ok := req.Form["X-Amz-Expires"]; ok {
|
||||
req.Form["X-Amz-Signature"] = []string{signature}
|
||||
} else {
|
||||
req.Header.Set("Authorization", auth) // Add Authorization header to request
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *V4Signer) SignRequest(req *http.Request) error {
|
||||
s.Sign(req)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
requestTime method will parse the time from the request "x-amz-date" or "date" headers.
|
||||
If the "x-amz-date" header is present, that will take priority over the "date" header.
|
||||
If neither header is defined or we are unable to parse either header as a valid date
|
||||
then we will create a new "x-amz-date" header with the current time.
|
||||
*/
|
||||
func (s *V4Signer) requestTime(req *http.Request) time.Time {
|
||||
|
||||
// Get "x-amz-date" header
|
||||
date := req.Header.Get("x-amz-date")
|
||||
|
||||
// Attempt to parse as ISO8601BasicFormat
|
||||
t, err := time.Parse(ISO8601BasicFormat, date)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
|
||||
// Attempt to parse as http.TimeFormat
|
||||
t, err = time.Parse(http.TimeFormat, date)
|
||||
if err == nil {
|
||||
req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
|
||||
return t
|
||||
}
|
||||
|
||||
// Get "date" header
|
||||
date = req.Header.Get("date")
|
||||
|
||||
// Attempt to parse as http.TimeFormat
|
||||
t, err = time.Parse(http.TimeFormat, date)
|
||||
if err == nil {
|
||||
return t
|
||||
}
|
||||
|
||||
// Create a current time header to be used
|
||||
t = time.Now().UTC()
|
||||
req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat))
|
||||
return t
|
||||
}
|
||||
|
||||
/*
|
||||
canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S)
|
||||
|
||||
CanonicalRequest =
|
||||
HTTPRequestMethod + '\n' +
|
||||
CanonicalURI + '\n' +
|
||||
CanonicalQueryString + '\n' +
|
||||
CanonicalHeaders + '\n' +
|
||||
SignedHeaders + '\n' +
|
||||
HexEncode(Hash(Payload))
|
||||
|
||||
payloadHash is optional; use the empty string and it will be calculated from the request
|
||||
*/
|
||||
func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string {
|
||||
if payloadHash == "" {
|
||||
payloadHash = s.payloadHash(req)
|
||||
}
|
||||
c := new(bytes.Buffer)
|
||||
fmt.Fprintf(c, "%s\n", req.Method)
|
||||
fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL))
|
||||
fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL))
|
||||
fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header))
|
||||
fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header))
|
||||
fmt.Fprintf(c, "%s", payloadHash)
|
||||
return c.String()
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalURI(u *url.URL) string {
|
||||
u = &url.URL{Path: u.Path}
|
||||
canonicalPath := u.String()
|
||||
|
||||
slash := strings.HasSuffix(canonicalPath, "/")
|
||||
canonicalPath = path.Clean(canonicalPath)
|
||||
|
||||
if canonicalPath == "" || canonicalPath == "." {
|
||||
canonicalPath = "/"
|
||||
}
|
||||
|
||||
if canonicalPath != "/" && slash {
|
||||
canonicalPath += "/"
|
||||
}
|
||||
|
||||
return canonicalPath
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalQueryString(u *url.URL) string {
|
||||
keyValues := make(map[string]string, len(u.Query()))
|
||||
keys := make([]string, len(u.Query()))
|
||||
|
||||
key_i := 0
|
||||
for k, vs := range u.Query() {
|
||||
k = url.QueryEscape(k)
|
||||
|
||||
a := make([]string, len(vs))
|
||||
for idx, v := range vs {
|
||||
v = url.QueryEscape(v)
|
||||
a[idx] = fmt.Sprintf("%s=%s", k, v)
|
||||
}
|
||||
|
||||
keyValues[k] = strings.Join(a, "&")
|
||||
keys[key_i] = k
|
||||
key_i++
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
query := make([]string, len(keys))
|
||||
for idx, key := range keys {
|
||||
query[idx] = keyValues[key]
|
||||
}
|
||||
|
||||
query_str := strings.Join(query, "&")
|
||||
|
||||
// AWS V4 signing requires that the space characters
|
||||
// are encoded as %20 instead of +. On the other hand
|
||||
// golangs url.QueryEscape as well as url.Values.Encode()
|
||||
// both encode the space as a + character. See:
|
||||
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
|
||||
// https://github.com/golang/go/issues/4013
|
||||
// https://groups.google.com/forum/#!topic/golang-nuts/BB443qEjPIk
|
||||
|
||||
return strings.Replace(query_str, "+", "%20", -1)
|
||||
}
|
||||
|
||||
func (s *V4Signer) canonicalHeaders(h http.Header) string {
|
||||
i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string)
|
||||
|
||||
for k, v := range h {
|
||||
lowerCase[strings.ToLower(k)] = v
|
||||
}
|
||||
|
||||
var keys []string
|
||||
for k := range lowerCase {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
v := lowerCase[k]
|
||||
for j, w := range v {
|
||||
v[j] = strings.Trim(w, " ")
|
||||
}
|
||||
sort.Strings(v)
|
||||
a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",")
|
||||
i++
|
||||
}
|
||||
return strings.Join(a, "\n")
|
||||
}
|
||||
|
||||
func (s *V4Signer) signedHeaders(h http.Header) string {
|
||||
i, a := 0, make([]string, len(h))
|
||||
for k, _ := range h {
|
||||
a[i] = strings.ToLower(k)
|
||||
i++
|
||||
}
|
||||
sort.Strings(a)
|
||||
return strings.Join(a, ";")
|
||||
}
|
||||
|
||||
func (s *V4Signer) payloadHash(req *http.Request) string {
|
||||
var b []byte
|
||||
if req.Body == nil {
|
||||
b = []byte("")
|
||||
} else {
|
||||
var err error
|
||||
b, err = ioutil.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
// TODO: I REALLY DON'T LIKE THIS PANIC!!!!
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer(b))
|
||||
return s.hash(string(b))
|
||||
}
|
||||
|
||||
/*
|
||||
stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu)
|
||||
|
||||
StringToSign =
|
||||
Algorithm + '\n' +
|
||||
RequestDate + '\n' +
|
||||
CredentialScope + '\n' +
|
||||
HexEncode(Hash(CanonicalRequest))
|
||||
*/
|
||||
func (s *V4Signer) stringToSign(t time.Time, creq string) string {
|
||||
w := new(bytes.Buffer)
|
||||
fmt.Fprint(w, "AWS4-HMAC-SHA256\n")
|
||||
fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat))
|
||||
fmt.Fprintf(w, "%s\n", s.credentialScope(t))
|
||||
fmt.Fprintf(w, "%s", s.hash(creq))
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func (s *V4Signer) credentialScope(t time.Time) string {
|
||||
return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName)
|
||||
}
|
||||
|
||||
/*
|
||||
signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1)
|
||||
|
||||
signature = HexEncode(HMAC(derived-signing-key, string-to-sign))
|
||||
*/
|
||||
func (s *V4Signer) signature(t time.Time, sts string) string {
|
||||
h := s.hmac(s.derivedKey(t), []byte(sts))
|
||||
return fmt.Sprintf("%x", h)
|
||||
}
|
||||
|
||||
/*
|
||||
derivedKey method derives a signing key to be used for signing a request.
|
||||
|
||||
kSecret = Your AWS Secret Access Key
|
||||
kDate = HMAC("AWS4" + kSecret, Date)
|
||||
kRegion = HMAC(kDate, Region)
|
||||
kService = HMAC(kRegion, Service)
|
||||
kSigning = HMAC(kService, "aws4_request")
|
||||
*/
|
||||
func (s *V4Signer) derivedKey(t time.Time) []byte {
|
||||
h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort)))
|
||||
h = s.hmac(h, []byte(s.region.Name))
|
||||
h = s.hmac(h, []byte(s.serviceName))
|
||||
h = s.hmac(h, []byte("aws4_request"))
|
||||
return h
|
||||
}
|
||||
|
||||
/*
|
||||
authorization method generates the authorization header value.
|
||||
*/
|
||||
func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string {
|
||||
w := new(bytes.Buffer)
|
||||
fmt.Fprint(w, "AWS4-HMAC-SHA256 ")
|
||||
fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t))
|
||||
fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header))
|
||||
fmt.Fprintf(w, "Signature=%s", signature)
|
||||
return w.String()
|
||||
}
|
||||
|
||||
// hash method calculates the sha256 hash for a given string
|
||||
func (s *V4Signer) hash(in string) string {
|
||||
h := sha256.New()
|
||||
fmt.Fprintf(h, "%s", in)
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
// hmac method calculates the sha256 hmac for a given slice of bytes
|
||||
func (s *V4Signer) hmac(key, data []byte) []byte {
|
||||
h := hmac.New(sha256.New, key)
|
||||
h.Write(data)
|
||||
return h.Sum(nil)
|
||||
}
|
202
vendor/github.com/docker/goamz/s3/lifecycle.go
generated
vendored
Normal file
202
vendor/github.com/docker/goamz/s3/lifecycle.go
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/xml"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Implements an interface for s3 bucket lifecycle configuration
|
||||
// See goo.gl/d0bbDf for details.
|
||||
|
||||
const (
|
||||
LifecycleRuleStatusEnabled = "Enabled"
|
||||
LifecycleRuleStatusDisabled = "Disabled"
|
||||
LifecycleRuleDateFormat = "2006-01-02"
|
||||
StorageClassGlacier = "GLACIER"
|
||||
)
|
||||
|
||||
type Expiration struct {
|
||||
Days *uint `xml:"Days,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
}
|
||||
|
||||
// Returns Date as a time.Time.
|
||||
func (r *Expiration) ParseDate() (time.Time, error) {
|
||||
return time.Parse(LifecycleRuleDateFormat, r.Date)
|
||||
}
|
||||
|
||||
type Transition struct {
|
||||
Days *uint `xml:"Days,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
// Returns Date as a time.Time.
|
||||
func (r *Transition) ParseDate() (time.Time, error) {
|
||||
return time.Parse(LifecycleRuleDateFormat, r.Date)
|
||||
}
|
||||
|
||||
type NoncurrentVersionExpiration struct {
|
||||
Days *uint `xml:"NoncurrentDays,omitempty"`
|
||||
}
|
||||
|
||||
type NoncurrentVersionTransition struct {
|
||||
Days *uint `xml:"NoncurrentDays,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
type LifecycleRule struct {
|
||||
ID string `xml:"ID"`
|
||||
Prefix string `xml:"Prefix"`
|
||||
Status string `xml:"Status"`
|
||||
NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
|
||||
NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
Transition *Transition `xml:"Transition,omitempty"`
|
||||
Expiration *Expiration `xml:"Expiration,omitempty"`
|
||||
}
|
||||
|
||||
// Create a lifecycle rule with arbitrary identifier id and object name prefix
|
||||
// for which the rules should apply.
|
||||
func NewLifecycleRule(id, prefix string) *LifecycleRule {
|
||||
rule := &LifecycleRule{
|
||||
ID: id,
|
||||
Prefix: prefix,
|
||||
Status: LifecycleRuleStatusEnabled,
|
||||
}
|
||||
return rule
|
||||
}
|
||||
|
||||
// Adds a transition rule in days. Overwrites any previous transition rule.
|
||||
func (r *LifecycleRule) SetTransitionDays(days uint) {
|
||||
r.Transition = &Transition{
|
||||
Days: &days,
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a transition rule as a date. Overwrites any previous transition rule.
|
||||
func (r *LifecycleRule) SetTransitionDate(date time.Time) {
|
||||
r.Transition = &Transition{
|
||||
Date: date.Format(LifecycleRuleDateFormat),
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an expiration rule in days. Overwrites any previous expiration rule.
|
||||
// Days must be > 0.
|
||||
func (r *LifecycleRule) SetExpirationDays(days uint) {
|
||||
r.Expiration = &Expiration{
|
||||
Days: &days,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds an expiration rule as a date. Overwrites any previous expiration rule.
|
||||
func (r *LifecycleRule) SetExpirationDate(date time.Time) {
|
||||
r.Expiration = &Expiration{
|
||||
Date: date.Format(LifecycleRuleDateFormat),
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a noncurrent version transition rule. Overwrites any previous
|
||||
// noncurrent version transition rule.
|
||||
func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) {
|
||||
r.NoncurrentVersionTransition = &NoncurrentVersionTransition{
|
||||
Days: &days,
|
||||
StorageClass: StorageClassGlacier,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a noncurrent version expiration rule. Days must be > 0. Overwrites
|
||||
// any previous noncurrent version expiration rule.
|
||||
func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) {
|
||||
r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{
|
||||
Days: &days,
|
||||
}
|
||||
}
|
||||
|
||||
// Marks the rule as disabled.
|
||||
func (r *LifecycleRule) Disable() {
|
||||
r.Status = LifecycleRuleStatusDisabled
|
||||
}
|
||||
|
||||
// Marks the rule as enabled (default).
|
||||
func (r *LifecycleRule) Enable() {
|
||||
r.Status = LifecycleRuleStatusEnabled
|
||||
}
|
||||
|
||||
type LifecycleConfiguration struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules *[]*LifecycleRule `xml:"Rule,omitempty"`
|
||||
}
|
||||
|
||||
// Adds a LifecycleRule to the configuration.
|
||||
func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) {
|
||||
var rules []*LifecycleRule
|
||||
if c.Rules != nil {
|
||||
rules = *c.Rules
|
||||
}
|
||||
rules = append(rules, r)
|
||||
c.Rules = &rules
|
||||
}
|
||||
|
||||
// Sets the bucket's lifecycle configuration.
|
||||
func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error {
|
||||
doc, err := xml.Marshal(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := makeXmlBuffer(doc)
|
||||
digest := md5.New()
|
||||
size, err := digest.Write(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
headers := map[string][]string{
|
||||
"Content-Length": {strconv.FormatInt(int64(size), 10)},
|
||||
"Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))},
|
||||
}
|
||||
|
||||
req := &request{
|
||||
path: "/",
|
||||
method: "PUT",
|
||||
bucket: b.Name,
|
||||
headers: headers,
|
||||
payload: buf,
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
return b.S3.queryV4Sign(req, nil)
|
||||
}
|
||||
|
||||
// Retrieves the lifecycle configuration for the bucket. AWS returns an error
|
||||
// if no lifecycle found.
|
||||
func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: b.Name,
|
||||
path: "/",
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
conf := &LifecycleConfiguration{}
|
||||
err := b.S3.queryV4Sign(req, conf)
|
||||
return conf, err
|
||||
}
|
||||
|
||||
// Delete the bucket's lifecycle configuration.
|
||||
func (b *Bucket) DeleteLifecycleConfiguration() error {
|
||||
req := &request{
|
||||
method: "DELETE",
|
||||
bucket: b.Name,
|
||||
path: "/",
|
||||
params: url.Values{"lifecycle": {""}},
|
||||
}
|
||||
|
||||
return b.S3.queryV4Sign(req, nil)
|
||||
}
|
508
vendor/github.com/docker/goamz/s3/multi.go
generated
vendored
Normal file
508
vendor/github.com/docker/goamz/s3/multi.go
generated
vendored
Normal file
|
@ -0,0 +1,508 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Multi represents an unfinished multipart upload.
|
||||
//
|
||||
// Multipart uploads allow sending big objects in smaller chunks.
|
||||
// After all parts have been sent, the upload must be explicitly
|
||||
// completed by calling Complete with the list of parts.
|
||||
//
|
||||
// See http://goo.gl/vJfTG for an overview of multipart uploads.
|
||||
type Multi struct {
|
||||
Bucket *Bucket
|
||||
Key string
|
||||
UploadId string
|
||||
}
|
||||
|
||||
// That's the default. Here just for testing.
|
||||
var listMultiMax = 1000
|
||||
|
||||
type listMultiResp struct {
|
||||
NextKeyMarker string
|
||||
NextUploadIdMarker string
|
||||
IsTruncated bool
|
||||
Upload []Multi
|
||||
CommonPrefixes []string `xml:"CommonPrefixes>Prefix"`
|
||||
}
|
||||
|
||||
// ListMulti returns the list of unfinished multipart uploads in b.
|
||||
//
|
||||
// The prefix parameter limits the response to keys that begin with the
|
||||
// specified prefix. You can use prefixes to separate a bucket into different
|
||||
// groupings of keys (to get the feeling of folders, for example).
|
||||
//
|
||||
// The delim parameter causes the response to group all of the keys that
|
||||
// share a common prefix up to the next delimiter in a single entry within
|
||||
// the CommonPrefixes field. You can use delimiters to separate a bucket
|
||||
// into different groupings of keys, similar to how folders would work.
|
||||
//
|
||||
// See http://goo.gl/ePioY for details.
|
||||
func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) {
|
||||
params := map[string][]string{
|
||||
"uploads": {""},
|
||||
"max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)},
|
||||
"prefix": {prefix},
|
||||
"delimiter": {delim},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: b.Name,
|
||||
params: params,
|
||||
}
|
||||
var resp listMultiResp
|
||||
err := b.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for i := range resp.Upload {
|
||||
multi := &resp.Upload[i]
|
||||
multi.Bucket = b
|
||||
multis = append(multis, multi)
|
||||
}
|
||||
prefixes = append(prefixes, resp.CommonPrefixes...)
|
||||
if !resp.IsTruncated {
|
||||
return multis, prefixes, nil
|
||||
}
|
||||
params["key-marker"] = []string{resp.NextKeyMarker}
|
||||
params["upload-id-marker"] = []string{resp.NextUploadIdMarker}
|
||||
attempt = attempts.Start() // Last request worked.
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Multi returns a multipart upload handler for the provided key
|
||||
// inside b. If a multipart upload exists for key, it is returned,
|
||||
// otherwise a new multipart upload is initiated with contType and perm.
|
||||
func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) {
|
||||
multis, _, err := b.ListMulti(key, "")
|
||||
if err != nil && !hasCode(err, "NoSuchUpload") {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range multis {
|
||||
if m.Key == key {
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
return b.InitMulti(key, contType, perm, options)
|
||||
}
|
||||
|
||||
// InitMulti initializes a new multipart upload at the provided
|
||||
// key inside b and returns a value for manipulating it.
|
||||
//
|
||||
// See http://goo.gl/XP8kL for details.
|
||||
func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) {
|
||||
headers := map[string][]string{
|
||||
"Content-Type": {contType},
|
||||
"Content-Length": {"0"},
|
||||
"x-amz-acl": {string(perm)},
|
||||
}
|
||||
options.addHeaders(headers)
|
||||
params := map[string][]string{
|
||||
"uploads": {""},
|
||||
}
|
||||
req := &request{
|
||||
method: "POST",
|
||||
bucket: b.Name,
|
||||
path: key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
}
|
||||
var err error
|
||||
var resp struct {
|
||||
UploadId string `xml:"UploadId"`
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
err = b.S3.query(req, &resp)
|
||||
if !shouldRetry(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil
|
||||
}
|
||||
|
||||
func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) {
|
||||
headers := map[string][]string{
|
||||
"x-amz-copy-source": {url.QueryEscape(source)},
|
||||
}
|
||||
options.addHeaders(headers)
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"partNumber": {strconv.FormatInt(int64(n), 10)},
|
||||
}
|
||||
|
||||
sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/"))
|
||||
sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil)
|
||||
if err != nil {
|
||||
return nil, Part{}, err
|
||||
}
|
||||
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "PUT",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
}
|
||||
resp := &CopyObjectResult{}
|
||||
err = m.Bucket.S3.query(req, resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, Part{}, err
|
||||
}
|
||||
if resp.ETag == "" {
|
||||
return nil, Part{}, errors.New("part upload succeeded with no ETag")
|
||||
}
|
||||
return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// PutPart sends part n of the multipart upload, reading all the content from r.
|
||||
// Each part, except for the last one, must be at least 5MB in size.
|
||||
//
|
||||
// See http://goo.gl/pqZer for details.
|
||||
func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) {
|
||||
partSize, _, md5b64, err := seekerInfo(r)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
return m.putPart(n, r, partSize, md5b64)
|
||||
}
|
||||
|
||||
func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) {
|
||||
headers := map[string][]string{
|
||||
"Content-Length": {strconv.FormatInt(partSize, 10)},
|
||||
"Content-MD5": {md5b64},
|
||||
}
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"partNumber": {strconv.FormatInt(int64(n), 10)},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
_, err := r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
req := &request{
|
||||
method: "PUT",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
headers: headers,
|
||||
params: params,
|
||||
payload: r,
|
||||
}
|
||||
err = m.Bucket.S3.prepare(req)
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
resp, err := m.Bucket.S3.run(req, nil)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return Part{}, err
|
||||
}
|
||||
etag := resp.Header.Get("ETag")
|
||||
if etag == "" {
|
||||
return Part{}, errors.New("part upload succeeded with no ETag")
|
||||
}
|
||||
return Part{n, etag, partSize}, nil
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) {
|
||||
_, err = r.Seek(0, 0)
|
||||
if err != nil {
|
||||
return 0, "", "", err
|
||||
}
|
||||
digest := md5.New()
|
||||
size, err = io.Copy(digest, r)
|
||||
if err != nil {
|
||||
return 0, "", "", err
|
||||
}
|
||||
sum := digest.Sum(nil)
|
||||
md5hex = hex.EncodeToString(sum)
|
||||
md5b64 = base64.StdEncoding.EncodeToString(sum)
|
||||
return size, md5hex, md5b64, nil
|
||||
}
|
||||
|
||||
type Part struct {
|
||||
N int `xml:"PartNumber"`
|
||||
ETag string
|
||||
Size int64
|
||||
}
|
||||
|
||||
type partSlice []Part
|
||||
|
||||
func (s partSlice) Len() int { return len(s) }
|
||||
func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N }
|
||||
func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
|
||||
type listPartsResp struct {
|
||||
NextPartNumberMarker string
|
||||
IsTruncated bool
|
||||
Part []Part
|
||||
}
|
||||
|
||||
// That's the default. Here just for testing.
|
||||
var listPartsMax = 1000
|
||||
|
||||
// Kept for backcompatability. See the documentation for ListPartsFull
|
||||
func (m *Multi) ListParts() ([]Part, error) {
|
||||
return m.ListPartsFull(0, listPartsMax)
|
||||
}
|
||||
|
||||
// ListParts returns the list of previously uploaded parts in m,
|
||||
// ordered by part number (Only parts with higher part numbers than
|
||||
// partNumberMarker will be listed). Only up to maxParts parts will be
|
||||
// returned.
|
||||
//
|
||||
// See http://goo.gl/ePioY for details.
|
||||
func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) {
|
||||
if maxParts > listPartsMax {
|
||||
maxParts = listPartsMax
|
||||
}
|
||||
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
"max-parts": {strconv.FormatInt(int64(maxParts), 10)},
|
||||
"part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)},
|
||||
}
|
||||
var parts partSlice
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "GET",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
}
|
||||
var resp listPartsResp
|
||||
err := m.Bucket.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts = append(parts, resp.Part...)
|
||||
if !resp.IsTruncated {
|
||||
sort.Sort(parts)
|
||||
return parts, nil
|
||||
}
|
||||
params["part-number-marker"] = []string{resp.NextPartNumberMarker}
|
||||
attempt = attempts.Start() // Last request worked.
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
type ReaderAtSeeker interface {
|
||||
io.ReaderAt
|
||||
io.ReadSeeker
|
||||
}
|
||||
|
||||
// PutAll sends all of r via a multipart upload with parts no larger
|
||||
// than partSize bytes, which must be set to at least 5MB.
|
||||
// Parts previously uploaded are either reused if their checksum
|
||||
// and size match the new part, or otherwise overwritten with the
|
||||
// new content.
|
||||
// PutAll returns all the parts of m (reused or not).
|
||||
func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) {
|
||||
old, err := m.ListParts()
|
||||
if err != nil && !hasCode(err, "NoSuchUpload") {
|
||||
return nil, err
|
||||
}
|
||||
reuse := 0 // Index of next old part to consider reusing.
|
||||
current := 1 // Part number of latest good part handled.
|
||||
totalSize, err := r.Seek(0, 2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
first := true // Must send at least one empty part if the file is empty.
|
||||
var result []Part
|
||||
NextSection:
|
||||
for offset := int64(0); offset < totalSize || first; offset += partSize {
|
||||
first = false
|
||||
if offset+partSize > totalSize {
|
||||
partSize = totalSize - offset
|
||||
}
|
||||
section := io.NewSectionReader(r, offset, partSize)
|
||||
_, md5hex, md5b64, err := seekerInfo(section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for reuse < len(old) && old[reuse].N <= current {
|
||||
// Looks like this part was already sent.
|
||||
part := &old[reuse]
|
||||
etag := `"` + md5hex + `"`
|
||||
if part.N == current && part.Size == partSize && part.ETag == etag {
|
||||
// Checksum matches. Reuse the old part.
|
||||
result = append(result, *part)
|
||||
current++
|
||||
continue NextSection
|
||||
}
|
||||
reuse++
|
||||
}
|
||||
|
||||
// Part wasn't found or doesn't match. Send it.
|
||||
part, err := m.putPart(current, section, partSize, md5b64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, part)
|
||||
current++
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type completeUpload struct {
|
||||
XMLName xml.Name `xml:"CompleteMultipartUpload"`
|
||||
Parts completeParts `xml:"Part"`
|
||||
}
|
||||
|
||||
type completePart struct {
|
||||
PartNumber int
|
||||
ETag string
|
||||
}
|
||||
|
||||
type completeParts []completePart
|
||||
|
||||
func (p completeParts) Len() int { return len(p) }
|
||||
func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber }
|
||||
func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
// We can't know in advance whether we'll have an Error or a
|
||||
// CompleteMultipartUploadResult, so this structure is just a placeholder to
|
||||
// know the name of the XML object.
|
||||
type completeUploadResp struct {
|
||||
XMLName xml.Name
|
||||
InnerXML string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
// Complete assembles the given previously uploaded parts into the
|
||||
// final object. This operation may take several minutes.
|
||||
//
|
||||
// See http://goo.gl/2Z7Tw for details.
|
||||
func (m *Multi) Complete(parts []Part) error {
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
}
|
||||
c := completeUpload{}
|
||||
for _, p := range parts {
|
||||
c.Parts = append(c.Parts, completePart{p.N, p.ETag})
|
||||
}
|
||||
sort.Sort(c.Parts)
|
||||
data, err := xml.Marshal(&c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "POST",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
payload: bytes.NewReader(data),
|
||||
}
|
||||
var resp completeUploadResp
|
||||
if m.Bucket.Region.Name == "generic" {
|
||||
headers := make(http.Header)
|
||||
headers.Add("Content-Length", strconv.FormatInt(int64(len(data)), 10))
|
||||
req.headers = headers
|
||||
}
|
||||
err := m.Bucket.S3.query(req, &resp)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// A 200 error code does not guarantee that there were no errors (see
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadComplete.html ),
|
||||
// so first figure out what kind of XML "object" we are dealing with.
|
||||
|
||||
if resp.XMLName.Local == "Error" {
|
||||
// S3.query does the unmarshalling for us, so we can't unmarshal
|
||||
// again in a different struct... So we need to duct-tape back the
|
||||
// original XML back together.
|
||||
fullErrorXml := "<Error>" + resp.InnerXML + "</Error>"
|
||||
s3err := &Error{}
|
||||
|
||||
if err := xml.Unmarshal([]byte(fullErrorXml), s3err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s3err
|
||||
}
|
||||
|
||||
if resp.XMLName.Local == "CompleteMultipartUploadResult" {
|
||||
// FIXME: One could probably add a CompleteFull method returning the
|
||||
// actual contents of the CompleteMultipartUploadResult object.
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.New("Invalid XML struct returned: " + resp.XMLName.Local)
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Abort deletes an unifinished multipart upload and any previously
|
||||
// uploaded parts for it.
|
||||
//
|
||||
// After a multipart upload is aborted, no additional parts can be
|
||||
// uploaded using it. However, if any part uploads are currently in
|
||||
// progress, those part uploads might or might not succeed. As a result,
|
||||
// it might be necessary to abort a given multipart upload multiple
|
||||
// times in order to completely free all storage consumed by all parts.
|
||||
//
|
||||
// NOTE: If the described scenario happens to you, please report back to
|
||||
// the goamz authors with details. In the future such retrying should be
|
||||
// handled internally, but it's not clear what happens precisely (Is an
|
||||
// error returned? Is the issue completely undetectable?).
|
||||
//
|
||||
// See http://goo.gl/dnyJw for details.
|
||||
func (m *Multi) Abort() error {
|
||||
params := map[string][]string{
|
||||
"uploadId": {m.UploadId},
|
||||
}
|
||||
for attempt := attempts.Start(); attempt.Next(); {
|
||||
req := &request{
|
||||
method: "DELETE",
|
||||
bucket: m.Bucket.Name,
|
||||
path: m.Key,
|
||||
params: params,
|
||||
}
|
||||
err := m.Bucket.S3.query(req, nil)
|
||||
if shouldRetry(err) && attempt.HasNext() {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
1305
vendor/github.com/docker/goamz/s3/s3.go
generated
vendored
Normal file
1305
vendor/github.com/docker/goamz/s3/s3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1023
vendor/github.com/docker/goamz/s3/s3test/server.go
generated
vendored
Normal file
1023
vendor/github.com/docker/goamz/s3/s3test/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
120
vendor/github.com/docker/goamz/s3/sign.go
generated
vendored
Normal file
120
vendor/github.com/docker/goamz/s3/sign.go
generated
vendored
Normal file
|
@ -0,0 +1,120 @@
|
|||
package s3
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"github.com/docker/goamz/aws"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var b64 = base64.StdEncoding
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// S3 signing (http://goo.gl/G1LrK)
|
||||
|
||||
var s3ParamsToSign = map[string]bool{
|
||||
"acl": true,
|
||||
"location": true,
|
||||
"logging": true,
|
||||
"notification": true,
|
||||
"partNumber": true,
|
||||
"policy": true,
|
||||
"requestPayment": true,
|
||||
"torrent": true,
|
||||
"uploadId": true,
|
||||
"uploads": true,
|
||||
"versionId": true,
|
||||
"versioning": true,
|
||||
"versions": true,
|
||||
"response-content-type": true,
|
||||
"response-content-language": true,
|
||||
"response-expires": true,
|
||||
"response-cache-control": true,
|
||||
"response-content-disposition": true,
|
||||
"response-content-encoding": true,
|
||||
"website": true,
|
||||
"delete": true,
|
||||
}
|
||||
|
||||
func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) {
|
||||
var md5, ctype, date, xamz string
|
||||
var xamzDate bool
|
||||
var keys, sarray []string
|
||||
xheaders := make(map[string]string)
|
||||
for k, v := range headers {
|
||||
k = strings.ToLower(k)
|
||||
switch k {
|
||||
case "content-md5":
|
||||
md5 = v[0]
|
||||
case "content-type":
|
||||
ctype = v[0]
|
||||
case "date":
|
||||
if !xamzDate {
|
||||
date = v[0]
|
||||
}
|
||||
default:
|
||||
if strings.HasPrefix(k, "x-amz-") {
|
||||
keys = append(keys, k)
|
||||
xheaders[k] = strings.Join(v, ",")
|
||||
if k == "x-amz-date" {
|
||||
xamzDate = true
|
||||
date = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(keys) > 0 {
|
||||
sort.StringSlice(keys).Sort()
|
||||
for i := range keys {
|
||||
key := keys[i]
|
||||
value := xheaders[key]
|
||||
sarray = append(sarray, key+":"+value)
|
||||
}
|
||||
xamz = strings.Join(sarray, "\n") + "\n"
|
||||
}
|
||||
|
||||
expires := false
|
||||
if v, ok := params["Expires"]; ok {
|
||||
// Query string request authentication alternative.
|
||||
expires = true
|
||||
date = v[0]
|
||||
params["AWSAccessKeyId"] = []string{auth.AccessKey}
|
||||
}
|
||||
|
||||
sarray = sarray[0:0]
|
||||
for k, v := range params {
|
||||
if s3ParamsToSign[k] {
|
||||
for _, vi := range v {
|
||||
if vi == "" {
|
||||
sarray = append(sarray, k)
|
||||
} else {
|
||||
// "When signing you do not encode these values."
|
||||
sarray = append(sarray, k+"="+vi)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(sarray) > 0 {
|
||||
sort.StringSlice(sarray).Sort()
|
||||
canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&")
|
||||
}
|
||||
|
||||
payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath
|
||||
hash := hmac.New(sha1.New, []byte(auth.SecretKey))
|
||||
hash.Write([]byte(payload))
|
||||
signature := make([]byte, b64.EncodedLen(hash.Size()))
|
||||
b64.Encode(signature, hash.Sum(nil))
|
||||
|
||||
if expires {
|
||||
params["Signature"] = []string{string(signature)}
|
||||
} else {
|
||||
headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)}
|
||||
}
|
||||
if debug {
|
||||
log.Printf("Signature payload: %q", payload)
|
||||
log.Printf("Signature: %q", signature)
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue