Merge pull request #623 from ahmetalpbalkan/azure-vendor

storage/driver/azure: Update vendored Azure SDK
This commit is contained in:
Stephen Day 2015-06-16 17:41:38 -07:00
commit 3ea67df373
27 changed files with 1835 additions and 1710 deletions

View file

@ -6,7 +6,7 @@ RUN apt-get update && \
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH ENV GOPATH $DISTRIBUTION_DIR/Godeps/_workspace:$GOPATH
ENV DOCKER_BUILDTAGS include_rados include_azure ENV DOCKER_BUILDTAGS include_rados
WORKDIR $DISTRIBUTION_DIR WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR COPY . $DISTRIBUTION_DIR

5
Godeps/Godeps.json generated
View file

@ -18,9 +18,8 @@
"Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99" "Rev": "cc210f45dcb9889c2769a274522be2bf70edfb99"
}, },
{ {
"ImportPath": "github.com/MSOpenTech/azure-sdk-for-go/storage", "ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-43-gd90753b", "Rev": "97d9593768bbbbd316f9c055dfc5f780933cd7fc"
"Rev": "d90753bcad2ed782fcead7392d1e831df29aa2bb"
}, },
{ {
"ImportPath": "github.com/Sirupsen/logrus", "ImportPath": "github.com/Sirupsen/logrus",

View file

@ -0,0 +1,29 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# Editor swap files
*.swp
*~
.DS_Store

View file

@ -0,0 +1,19 @@
sudo: false
language: go
before_script:
- go get -u golang.org/x/tools/cmd/vet
- go get -u github.com/golang/lint/golint
go: tip
script:
- test -z "$(gofmt -s -l -w management | tee /dev/stderr)"
- test -z "$(gofmt -s -l -w storage | tee /dev/stderr)"
- go build -v ./...
- go test -v ./storage/... -check.v
- test -z "$(golint ./storage/... | tee /dev/stderr)"
- go vet ./storage/...
- go test -v ./management/...
- test -z "$(golint ./management/... | grep -v 'should have comment' | grep -v 'stutters' | tee /dev/stderr)"
- go vet ./management/...

View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,88 @@
# Microsoft Azure SDK for Go
This project provides various Go packages to perform operations
on Microsoft Azure REST APIs.
[![GoDoc](https://godoc.org/github.com/Azure/azure-sdk-for-go?status.svg)](https://godoc.org/github.com/Azure/azure-sdk-for-go) [![Build Status](https://travis-ci.org/Azure/azure-sdk-for-go.svg?branch=master)](https://travis-ci.org/Azure/azure-sdk-for-go)
See list of implemented API clients [here](http://godoc.org/github.com/Azure/azure-sdk-for-go).
> **NOTE:** This repository is under heavy ongoing development and
is likely to break over time. We currently do not have any releases
yet. If you are planning to use the repository, please consider vendoring
the packages in your project and update them when a stable tag is out.
# Installation
go get -d github.com/Azure/azure-sdk-for-go/management
# Usage
Read Godoc of the repository at: http://godoc.org/github.com/Azure/azure-sdk-for-go/
The client currently supports authentication to the Service Management
API with certificates or Azure `.publishSettings` file. You can
download the `.publishSettings` file for your subscriptions
[here](https://manage.windowsazure.com/publishsettings).
### Example: Creating a Linux Virtual Machine
```go
package main
import (
"encoding/base64"
"fmt"
"github.com/Azure/azure-sdk-for-go/management"
"github.com/Azure/azure-sdk-for-go/management/hostedservice"
"github.com/Azure/azure-sdk-for-go/management/virtualmachine"
"github.com/Azure/azure-sdk-for-go/management/vmutils"
)
func main() {
dnsName := "test-vm-from-go"
storageAccount := "mystorageaccount"
location := "West US"
vmSize := "Small"
vmImage := "b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140724-en-us-30GB"
userName := "testuser"
userPassword := "Test123"
client, err := management.ClientFromPublishSettingsFile("path/to/downloaded.publishsettings", "")
if err != nil {
panic(err)
}
// create hosted service
if err := hostedservice.NewClient(client).CreateHostedService(hostedservice.CreateHostedServiceParameters{
ServiceName: dnsName,
Location: location,
Label: base64.StdEncoding.EncodeToString([]byte(dnsName))}); err != nil {
panic(err)
}
// create virtual machine
role := vmutils.NewVMConfiguration(dnsName, vmSize)
vmutils.ConfigureDeploymentFromPlatformImage(
&role,
vmImage,
fmt.Sprintf("http://%s.blob.core.windows.net/sdktest/%s.vhd", storageAccount, dnsName),
"")
vmutils.ConfigureForLinux(&role, dnsName, userName, userPassword)
vmutils.ConfigureWithPublicSSH(&role)
operationID, err := virtualmachine.NewClient(client).
CreateDeployment(role, dnsName, virtualmachine.CreateDeploymentOptions{})
if err != nil {
panic(err)
}
if err := client.WaitForOperation(operationID, nil); err != nil {
panic(err)
}
}
```
# License
This project is published under [Apache 2.0 License](LICENSE).

View file

@ -2,7 +2,6 @@ package storage
import ( import (
"bytes" "bytes"
"encoding/base64"
"encoding/xml" "encoding/xml"
"errors" "errors"
"fmt" "fmt"
@ -14,8 +13,10 @@ import (
"time" "time"
) )
// BlobStorageClient contains operations for Microsoft Azure Blob Storage
// Service.
type BlobStorageClient struct { type BlobStorageClient struct {
client StorageClient client Client
} }
// A Container is an entry in ContainerListResponse. // A Container is an entry in ContainerListResponse.
@ -25,8 +26,8 @@ type Container struct {
// TODO (ahmetalpbalkan) Metadata // TODO (ahmetalpbalkan) Metadata
} }
// ContainerProperties contains various properties of a // ContainerProperties contains various properties of a container returned from
// container returned from various endpoints like ListContainers. // various endpoints like ListContainers.
type ContainerProperties struct { type ContainerProperties struct {
LastModified string `xml:"Last-Modified"` LastModified string `xml:"Last-Modified"`
Etag string `xml:"Etag"` Etag string `xml:"Etag"`
@ -37,7 +38,9 @@ type ContainerProperties struct {
} }
// ContainerListResponse contains the response fields from // ContainerListResponse contains the response fields from
// ListContainers call. https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx // ListContainers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ContainerListResponse struct { type ContainerListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"` XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"` Xmlns string `xml:"xmlns,attr"`
@ -66,7 +69,7 @@ type BlobProperties struct {
ContentEncoding string `xml:"Content-Encoding"` ContentEncoding string `xml:"Content-Encoding"`
BlobType BlobType `xml:"x-ms-blob-blob-type"` BlobType BlobType `xml:"x-ms-blob-blob-type"`
SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` SequenceNumber int64 `xml:"x-ms-blob-sequence-number"`
CopyId string `xml:"CopyId"` CopyID string `xml:"CopyId"`
CopyStatus string `xml:"CopyStatus"` CopyStatus string `xml:"CopyStatus"`
CopySource string `xml:"CopySource"` CopySource string `xml:"CopySource"`
CopyProgress string `xml:"CopyProgress"` CopyProgress string `xml:"CopyProgress"`
@ -74,8 +77,9 @@ type BlobProperties struct {
CopyStatusDescription string `xml:"CopyStatusDescription"` CopyStatusDescription string `xml:"CopyStatusDescription"`
} }
// BlobListResponse contains the response fields from // BlobListResponse contains the response fields from ListBlobs call.
// ListBlobs call. https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type BlobListResponse struct { type BlobListResponse struct {
XMLName xml.Name `xml:"EnumerationResults"` XMLName xml.Name `xml:"EnumerationResults"`
Xmlns string `xml:"xmlns,attr"` Xmlns string `xml:"xmlns,attr"`
@ -86,8 +90,10 @@ type BlobListResponse struct {
Blobs []Blob `xml:"Blobs>Blob"` Blobs []Blob `xml:"Blobs>Blob"`
} }
// ListContainersParameters defines the set of customizable // ListContainersParameters defines the set of customizable parameters to make a
// parameters to make a List Containers call. https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx // List Containers call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
type ListContainersParameters struct { type ListContainersParameters struct {
Prefix string Prefix string
Marker string Marker string
@ -119,7 +125,9 @@ func (p ListContainersParameters) getParameters() url.Values {
} }
// ListBlobsParameters defines the set of customizable // ListBlobsParameters defines the set of customizable
// parameters to make a List Blobs call. https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx // parameters to make a List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
type ListBlobsParameters struct { type ListBlobsParameters struct {
Prefix string Prefix string
Delimiter string Delimiter string
@ -157,6 +165,7 @@ func (p ListBlobsParameters) getParameters() url.Values {
// BlobType defines the type of the Azure Blob. // BlobType defines the type of the Azure Blob.
type BlobType string type BlobType string
// Types of page blobs
const ( const (
BlobTypeBlock BlobType = "BlockBlob" BlobTypeBlock BlobType = "BlockBlob"
BlobTypePage BlobType = "PageBlob" BlobTypePage BlobType = "PageBlob"
@ -166,6 +175,7 @@ const (
// done on the page blob. // done on the page blob.
type PageWriteType string type PageWriteType string
// Types of operations on page blobs
const ( const (
PageWriteTypeUpdate PageWriteType = "update" PageWriteTypeUpdate PageWriteType = "update"
PageWriteTypeClear PageWriteType = "clear" PageWriteTypeClear PageWriteType = "clear"
@ -178,29 +188,35 @@ const (
blobCopyStatusFailed = "failed" blobCopyStatusFailed = "failed"
) )
// BlockListType is used to filter out types of blocks // BlockListType is used to filter out types of blocks in a Get Blocks List call
// in a Get Blocks List call for a block blob. See // for a block blob.
// https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx //
// for all block types. // See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx for all
// block types.
type BlockListType string type BlockListType string
// Filters for listing blocks in block blobs
const ( const (
BlockListTypeAll BlockListType = "all" BlockListTypeAll BlockListType = "all"
BlockListTypeCommitted BlockListType = "committed" BlockListTypeCommitted BlockListType = "committed"
BlockListTypeUncommitted BlockListType = "uncommitted" BlockListTypeUncommitted BlockListType = "uncommitted"
) )
// ContainerAccessType defines the access level to the container // ContainerAccessType defines the access level to the container from a public
// from a public request. See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx // request.
// and "x-ms-blob-public-access" header. //
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx and "x-ms-
// blob-public-access" header.
type ContainerAccessType string type ContainerAccessType string
// Access options for containers
const ( const (
ContainerAccessTypePrivate ContainerAccessType = "" ContainerAccessTypePrivate ContainerAccessType = ""
ContainerAccessTypeBlob ContainerAccessType = "blob" ContainerAccessTypeBlob ContainerAccessType = "blob"
ContainerAccessTypeContainer ContainerAccessType = "container" ContainerAccessTypeContainer ContainerAccessType = "container"
) )
// Maximum sizes (per REST API) for various concepts
const ( const (
MaxBlobBlockSize = 4 * 1024 * 1024 MaxBlobBlockSize = 4 * 1024 * 1024
MaxBlobPageSize = 4 * 1024 * 1024 MaxBlobPageSize = 4 * 1024 * 1024
@ -210,6 +226,7 @@ const (
// be in. // be in.
type BlockStatus string type BlockStatus string
// List of statuses that can be used to refer to a block in a block list
const ( const (
BlockStatusUncommitted BlockStatus = "Uncommitted" BlockStatusUncommitted BlockStatus = "Uncommitted"
BlockStatusCommitted BlockStatus = "Committed" BlockStatusCommitted BlockStatus = "Committed"
@ -219,12 +236,13 @@ const (
// Block is used to create Block entities for Put Block List // Block is used to create Block entities for Put Block List
// call. // call.
type Block struct { type Block struct {
Id string ID string
Status BlockStatus Status BlockStatus
} }
// BlockListResponse contains the response fields from // BlockListResponse contains the response fields from Get Block List call.
// Get Block List call. https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
type BlockListResponse struct { type BlockListResponse struct {
XMLName xml.Name `xml:"BlockList"` XMLName xml.Name `xml:"BlockList"`
CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"` CommittedBlocks []BlockResponse `xml:"CommittedBlocks>Block"`
@ -239,31 +257,32 @@ type BlockResponse struct {
} }
// GetPageRangesResponse contains the reponse fields from // GetPageRangesResponse contains the reponse fields from
// Get Page Ranges call. https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx // Get Page Ranges call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type GetPageRangesResponse struct { type GetPageRangesResponse struct {
XMLName xml.Name `xml:"PageList"` XMLName xml.Name `xml:"PageList"`
PageList []PageRange `xml:"PageRange"` PageList []PageRange `xml:"PageRange"`
} }
// PageRange contains information about a page of a page blob from // PageRange contains information about a page of a page blob from
// Get Pages Range call. https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx // Get Pages Range call.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
type PageRange struct { type PageRange struct {
Start int64 `xml:"Start"` Start int64 `xml:"Start"`
End int64 `xml:"End"` End int64 `xml:"End"`
} }
var ( var (
ErrNotCreated = errors.New("storage: operation has returned a successful error code other than 201 Created.")
ErrNotAccepted = errors.New("storage: operation has returned a successful error code other than 202 Accepted.")
errBlobCopyAborted = errors.New("storage: blob copy is aborted") errBlobCopyAborted = errors.New("storage: blob copy is aborted")
errBlobCopyIdMismatch = errors.New("storage: blob copy id is a mismatch") errBlobCopyIDMismatch = errors.New("storage: blob copy id is a mismatch")
) )
const errUnexpectedStatus = "storage: was expecting status code: %d, got: %d"
// ListContainers returns the list of containers in a storage account along with // ListContainers returns the list of containers in a storage account along with
// pagination token and other response details. See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx // pagination token and other response details.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179352.aspx
func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) { func (b BlobStorageClient) ListContainers(params ListContainersParameters) (ContainerListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}}) q := mergeParams(params.getParameters(), url.Values{"comp": {"list"}})
uri := b.client.getEndpoint(blobServiceName, "", q) uri := b.client.getEndpoint(blobServiceName, "", q)
@ -274,31 +293,34 @@ func (b BlobStorageClient) ListContainers(params ListContainersParameters) (Cont
if err != nil { if err != nil {
return out, err return out, err
} }
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out) err = xmlUnmarshal(resp.body, &out)
return out, err return out, err
} }
// CreateContainer creates a blob container within the storage account // CreateContainer creates a blob container within the storage account
// with given name and access level. See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx // with given name and access level. Returns error if container already exists.
// Returns error if container already exists. //
// See https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error { func (b BlobStorageClient) CreateContainer(name string, access ContainerAccessType) error {
resp, err := b.createContainer(name, access) resp, err := b.createContainer(name, access)
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
return nil
} }
// CreateContainerIfNotExists creates a blob container if it does not exist. Returns // CreateContainerIfNotExists creates a blob container if it does not exist. Returns
// true if container is newly created or false if container already exists. // true if container is newly created or false if container already exists.
func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) { func (b BlobStorageClient) CreateContainerIfNotExists(name string, access ContainerAccessType) (bool, error) {
resp, err := b.createContainer(name, access) resp, err := b.createContainer(name, access)
if resp != nil && (resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict) { if resp != nil {
return resp.statusCode == http.StatusCreated, nil defer resp.body.Close()
if resp.statusCode == http.StatusCreated || resp.statusCode == http.StatusConflict {
return resp.statusCode == http.StatusCreated, nil
}
} }
return false, err return false, err
} }
@ -323,34 +345,41 @@ func (b BlobStorageClient) ContainerExists(name string) (bool, error) {
headers := b.client.getStandardHeaders() headers := b.client.getStandardHeaders()
resp, err := b.client.exec(verb, uri, headers, nil) resp, err := b.client.exec(verb, uri, headers, nil)
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { if resp != nil {
return resp.statusCode == http.StatusOK, nil defer resp.body.Close()
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, nil
}
} }
return false, err return false, err
} }
// DeleteContainer deletes the container with given name on the storage // DeleteContainer deletes the container with given name on the storage
// account. See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx // account. If the container does not exist returns error.
// If the container does not exist returns error. //
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
func (b BlobStorageClient) DeleteContainer(name string) error { func (b BlobStorageClient) DeleteContainer(name string) error {
resp, err := b.deleteContainer(name) resp, err := b.deleteContainer(name)
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusAccepted { defer resp.body.Close()
return ErrNotAccepted return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}
return nil
} }
// DeleteContainer deletes the container with given name on the storage // DeleteContainerIfExists deletes the container with given name on the storage
// account if it exists. See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx // account if it exists. Returns true if container is deleted with this call, or
// Returns true if container is deleted with this call, or false // false if the container did not exist at the time of the Delete Container
// if the container did not exist at the time of the Delete Container operation. // operation.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179408.aspx
func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) { func (b BlobStorageClient) DeleteContainerIfExists(name string) (bool, error) {
resp, err := b.deleteContainer(name) resp, err := b.deleteContainer(name)
if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { if resp != nil {
return resp.statusCode == http.StatusAccepted, nil defer resp.body.Close()
if resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusAccepted, nil
}
} }
return false, err return false, err
} }
@ -365,6 +394,7 @@ func (b BlobStorageClient) deleteContainer(name string) (*storageResponse, error
// ListBlobs returns an object that contains list of blobs in the container, // ListBlobs returns an object that contains list of blobs in the container,
// pagination token and other information in the response of List Blobs call. // pagination token and other information in the response of List Blobs call.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx // See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx
func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) { func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameters) (BlobListResponse, error) {
q := mergeParams(params.getParameters(), url.Values{ q := mergeParams(params.getParameters(), url.Values{
@ -378,60 +408,68 @@ func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameter
if err != nil { if err != nil {
return out, err return out, err
} }
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out) err = xmlUnmarshal(resp.body, &out)
return out, err return out, err
} }
// BlobExists returns true if a blob with given name exists on the // BlobExists returns true if a blob with given name exists on the specified
// specified container of the storage account. // container of the storage account.
func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { func (b BlobStorageClient) BlobExists(container, name string) (bool, error) {
verb := "HEAD" verb := "HEAD"
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
headers := b.client.getStandardHeaders() headers := b.client.getStandardHeaders()
resp, err := b.client.exec(verb, uri, headers, nil) resp, err := b.client.exec(verb, uri, headers, nil)
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) { if resp != nil {
return resp.statusCode == http.StatusOK, nil defer resp.body.Close()
if resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound {
return resp.statusCode == http.StatusOK, nil
}
} }
return false, err return false, err
} }
// GetBlobUrl gets the canonical URL to the blob with the specified // GetBlobURL gets the canonical URL to the blob with the specified name in the
// name in the specified container. This method does not create a // specified container. This method does not create a publicly accessible URL if
// publicly accessible URL if the blob or container is private and this // the blob or container is private and this method does not check if the blob
// method does not check if the blob exists. // exists.
func (b BlobStorageClient) GetBlobUrl(container, name string) string { func (b BlobStorageClient) GetBlobURL(container, name string) string {
if container == "" { if container == "" {
container = "$root" container = "$root"
} }
return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) return b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
} }
// GetBlob downloads a blob to a stream. See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx // GetBlob returns a stream to read the blob. Caller must call Close() the
// reader to close on the underlying connection.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) { func (b BlobStorageClient) GetBlob(container, name string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, "") resp, err := b.getBlobRange(container, name, "")
if err != nil { if err != nil {
return nil, err return nil, err
} }
if resp.statusCode != http.StatusOK { if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, fmt.Errorf(errUnexpectedStatus, http.StatusOK, resp.statusCode) return nil, err
} }
return resp.body, nil return resp.body, nil
} }
// GetBlobRange reads the specified range of a blob to a stream. // GetBlobRange reads the specified range of a blob to a stream. The bytesRange
// The bytesRange string must be in a format like "0-", "10-100" // string must be in a format like "0-", "10-100" as defined in HTTP 1.1 spec.
// as defined in HTTP 1.1 spec. See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd179440.aspx
func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) { func (b BlobStorageClient) GetBlobRange(container, name, bytesRange string) (io.ReadCloser, error) {
resp, err := b.getBlobRange(container, name, bytesRange) resp, err := b.getBlobRange(container, name, bytesRange)
if err != nil { if err != nil {
return nil, err return nil, err
} }
if resp.statusCode != http.StatusPartialContent { if err := checkRespCode(resp.statusCode, []int{http.StatusPartialContent}); err != nil {
return nil, fmt.Errorf(errUnexpectedStatus, http.StatusPartialContent, resp.statusCode) return nil, err
} }
return resp.body, nil return resp.body, nil
} }
@ -462,9 +500,10 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer resp.body.Close()
if resp.statusCode != http.StatusOK { if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return nil, fmt.Errorf(errUnexpectedStatus, http.StatusOK, resp.statusCode) return nil, err
} }
var contentLength int64 var contentLength int64
@ -494,7 +533,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
SequenceNumber: sequenceNum, SequenceNumber: sequenceNum,
CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"),
CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"),
CopyId: resp.headers.Get("x-ms-copy-id"), CopyID: resp.headers.Get("x-ms-copy-id"),
CopyProgress: resp.headers.Get("x-ms-copy-progress"), CopyProgress: resp.headers.Get("x-ms-copy-progress"),
CopySource: resp.headers.Get("x-ms-copy-source"), CopySource: resp.headers.Get("x-ms-copy-source"),
CopyStatus: resp.headers.Get("x-ms-copy-status"), CopyStatus: resp.headers.Get("x-ms-copy-status"),
@ -503,6 +542,7 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope
} }
// CreateBlockBlob initializes an empty block blob with no blocks. // CreateBlockBlob initializes an empty block blob with no blocks.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
func (b BlobStorageClient) CreateBlockBlob(container, name string) error { func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
path := fmt.Sprintf("%s/%s", container, name) path := fmt.Sprintf("%s/%s", container, name)
@ -515,96 +555,25 @@ func (b BlobStorageClient) CreateBlockBlob(container, name string) error {
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
return nil
}
// PutBlockBlob uploads given stream into a block blob by splitting
// data stream into chunks and uploading as blocks. Commits the block
// list at the end. This is a helper method built on top of PutBlock
// and PutBlockList methods with sequential block ID counting logic.
func (b BlobStorageClient) PutBlockBlob(container, name string, blob io.Reader) error { // TODO (ahmetalpbalkan) consider ReadCloser and closing
return b.putBlockBlob(container, name, blob, MaxBlobBlockSize)
}
func (b BlobStorageClient) putBlockBlob(container, name string, blob io.Reader, chunkSize int) error {
if chunkSize <= 0 || chunkSize > MaxBlobBlockSize {
chunkSize = MaxBlobBlockSize
}
chunk := make([]byte, chunkSize)
n, err := blob.Read(chunk)
if err != nil && err != io.EOF {
return err
}
if err == io.EOF {
// Fits into one block
return b.putSingleBlockBlob(container, name, chunk[:n])
} else {
// Does not fit into one block. Upload block by block then commit the block list
blockList := []Block{}
// Put blocks
for blockNum := 0; ; blockNum++ {
id := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%011d", blockNum)))
data := chunk[:n]
err = b.PutBlock(container, name, id, data)
if err != nil {
return err
}
blockList = append(blockList, Block{id, BlockStatusLatest})
// Read next block
n, err = blob.Read(chunk)
if err != nil && err != io.EOF {
return err
}
if err == io.EOF {
break
}
}
// Commit block list
return b.PutBlockList(container, name, blockList)
}
}
func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error {
if len(chunk) > MaxBlobBlockSize {
return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize)
}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
headers := b.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
if err != nil {
return err
}
if resp.statusCode != http.StatusCreated {
return ErrNotCreated
}
return nil
} }
// PutBlock saves the given data chunk to the specified block blob with // PutBlock saves the given data chunk to the specified block blob with
// given ID. See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx // given ID.
func (b BlobStorageClient) PutBlock(container, name, blockId string, chunk []byte) error { //
return b.PutBlockWithLength(container, name, blockId, uint64(len(chunk)), bytes.NewReader(chunk)) // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
func (b BlobStorageClient) PutBlock(container, name, blockID string, chunk []byte) error {
return b.PutBlockWithLength(container, name, blockID, uint64(len(chunk)), bytes.NewReader(chunk))
} }
// PutBlockWithLength saves the given data stream of exactly specified size to the block blob // PutBlockWithLength saves the given data stream of exactly specified size to
// with given ID. See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx // the block blob with given ID. It is an alternative to PutBlocks where data
// It is an alternative to PutBlocks where data comes as stream but the length is // comes as stream but the length is known in advance.
// known in advance. //
func (b BlobStorageClient) PutBlockWithLength(container, name, blockId string, size uint64, blob io.Reader) error { // See https://msdn.microsoft.com/en-us/library/azure/dd135726.aspx
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockId}}) func (b BlobStorageClient) PutBlockWithLength(container, name, blockID string, size uint64, blob io.Reader) error {
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"block"}, "blockid": {blockID}})
headers := b.client.getStandardHeaders() headers := b.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock) headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = fmt.Sprintf("%v", size) headers["Content-Length"] = fmt.Sprintf("%v", size)
@ -613,34 +582,31 @@ func (b BlobStorageClient) PutBlockWithLength(container, name, blockId string, s
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
return nil
} }
// PutBlockList saves list of blocks to the specified block blob. See // PutBlockList saves list of blocks to the specified block blob.
// https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd179467.aspx
func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error { func (b BlobStorageClient) PutBlockList(container, name string, blocks []Block) error {
blockListXml := prepareBlockListRequest(blocks) blockListXML := prepareBlockListRequest(blocks)
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}}) uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{"comp": {"blocklist"}})
headers := b.client.getStandardHeaders() headers := b.client.getStandardHeaders()
headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXml)) headers["Content-Length"] = fmt.Sprintf("%v", len(blockListXML))
resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXml)) resp, err := b.client.exec("PUT", uri, headers, strings.NewReader(blockListXML))
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
return nil
} }
// GetBlockList retrieves list of blocks in the specified block blob. See // GetBlockList retrieves list of blocks in the specified block blob.
// https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd179400.aspx
func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) { func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockListType) (BlockListResponse, error) {
params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}} params := url.Values{"comp": {"blocklist"}, "blocklisttype": {string(blockType)}}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
@ -651,6 +617,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL
if err != nil { if err != nil {
return out, err return out, err
} }
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &out) err = xmlUnmarshal(resp.body, &out)
return out, err return out, err
@ -659,6 +626,7 @@ func (b BlobStorageClient) GetBlockList(container, name string, blockType BlockL
// PutPageBlob initializes an empty page blob with specified name and maximum // PutPageBlob initializes an empty page blob with specified name and maximum
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must // size in bytes (size must be aligned to a 512-byte boundary). A page blob must
// be created using this method before writing pages. // be created using this method before writing pages.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx // See https://msdn.microsoft.com/en-us/library/azure/dd179451.aspx
func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error { func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error {
path := fmt.Sprintf("%s/%s", container, name) path := fmt.Sprintf("%s/%s", container, name)
@ -672,15 +640,15 @@ func (b BlobStorageClient) PutPageBlob(container, name string, size int64) error
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated
} return checkRespCode(resp.statusCode, []int{http.StatusCreated})
return nil
} }
// PutPage writes a range of pages to a page blob or clears the given range. // PutPage writes a range of pages to a page blob or clears the given range.
// In case of 'clear' writes, given chunk is discarded. Ranges must be aligned // In case of 'clear' writes, given chunk is discarded. Ranges must be aligned
// with 512-byte boundaries and chunk must be of size multiplies by 512. // with 512-byte boundaries and chunk must be of size multiplies by 512.
//
// See https://msdn.microsoft.com/en-us/library/ee691975.aspx // See https://msdn.microsoft.com/en-us/library/ee691975.aspx
func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error { func (b BlobStorageClient) PutPage(container, name string, startByte, endByte int64, writeType PageWriteType, chunk []byte) error {
path := fmt.Sprintf("%s/%s", container, name) path := fmt.Sprintf("%s/%s", container, name)
@ -705,13 +673,13 @@ func (b BlobStorageClient) PutPage(container, name string, startByte, endByte in
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusCreated { defer resp.body.Close()
return ErrNotCreated
} return checkRespCode(resp.statusCode, []int{http.StatusCreated})
return nil
} }
// GetPageRanges returns the list of valid page ranges for a page blob. // GetPageRanges returns the list of valid page ranges for a page blob.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx // See https://msdn.microsoft.com/en-us/library/azure/ee691973.aspx
func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) { func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesResponse, error) {
path := fmt.Sprintf("%s/%s", container, name) path := fmt.Sprintf("%s/%s", container, name)
@ -723,26 +691,28 @@ func (b BlobStorageClient) GetPageRanges(container, name string) (GetPageRangesR
if err != nil { if err != nil {
return out, err return out, err
} }
defer resp.body.Close()
if resp.statusCode != http.StatusOK { if err := checkRespCode(resp.statusCode, []int{http.StatusOK}); err != nil {
return out, fmt.Errorf(errUnexpectedStatus, http.StatusOK, resp.statusCode) return out, err
} }
err = xmlUnmarshal(resp.body, &out) err = xmlUnmarshal(resp.body, &out)
return out, err return out, err
} }
// CopyBlob starts a blob copy operation and waits for the operation to complete. // CopyBlob starts a blob copy operation and waits for the operation to
// sourceBlob parameter must be a canonical URL to the blob (can be obtained using // complete. sourceBlob parameter must be a canonical URL to the blob (can be
// GetBlobURL method.) There is no SLA on blob copy and therefore this helper // obtained using GetBlobURL method.) There is no SLA on blob copy and therefore
// method works faster on smaller files. See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx // this helper method works faster on smaller files.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd894037.aspx
func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error { func (b BlobStorageClient) CopyBlob(container, name, sourceBlob string) error {
copyId, err := b.startBlobCopy(container, name, sourceBlob) copyID, err := b.startBlobCopy(container, name, sourceBlob)
if err != nil { if err != nil {
return err return err
} }
return b.waitForBlobCopy(container, name, copyId) return b.waitForBlobCopy(container, name, copyID)
} }
func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) { func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (string, error) {
@ -756,26 +726,28 @@ func (b BlobStorageClient) startBlobCopy(container, name, sourceBlob string) (st
if err != nil { if err != nil {
return "", err return "", err
} }
if resp.statusCode != http.StatusAccepted && resp.statusCode != http.StatusCreated { defer resp.body.Close()
return "", fmt.Errorf(errUnexpectedStatus, []int{http.StatusAccepted, http.StatusCreated}, resp.statusCode)
if err := checkRespCode(resp.statusCode, []int{http.StatusAccepted, http.StatusCreated}); err != nil {
return "", err
} }
copyId := resp.headers.Get("x-ms-copy-id") copyID := resp.headers.Get("x-ms-copy-id")
if copyId == "" { if copyID == "" {
return "", errors.New("Got empty copy id header") return "", errors.New("Got empty copy id header")
} }
return copyId, nil return copyID, nil
} }
func (b BlobStorageClient) waitForBlobCopy(container, name, copyId string) error { func (b BlobStorageClient) waitForBlobCopy(container, name, copyID string) error {
for { for {
props, err := b.GetBlobProperties(container, name) props, err := b.GetBlobProperties(container, name)
if err != nil { if err != nil {
return err return err
} }
if props.CopyId != copyId { if props.CopyID != copyID {
return errBlobCopyIdMismatch return errBlobCopyIDMismatch
} }
switch props.CopyStatus { switch props.CopyStatus {
@ -786,7 +758,7 @@ func (b BlobStorageClient) waitForBlobCopy(container, name, copyId string) error
case blobCopyStatusAborted: case blobCopyStatusAborted:
return errBlobCopyAborted return errBlobCopyAborted
case blobCopyStatusFailed: case blobCopyStatusFailed:
return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyId, props.CopyStatusDescription) return fmt.Errorf("storage: blob copy failed. Id=%s Description=%s", props.CopyID, props.CopyStatusDescription)
default: default:
return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus) return fmt.Errorf("storage: unhandled blob copy status: '%s'", props.CopyStatus)
} }
@ -801,20 +773,20 @@ func (b BlobStorageClient) DeleteBlob(container, name string) error {
if err != nil { if err != nil {
return err return err
} }
if resp.statusCode != http.StatusAccepted { defer resp.body.Close()
return ErrNotAccepted return checkRespCode(resp.statusCode, []int{http.StatusAccepted})
}
return nil
} }
// DeleteBlobIfExists deletes the given blob from the specified container // DeleteBlobIfExists deletes the given blob from the specified container If the
// If the blob is deleted with this call, returns true. Otherwise returns // blob is deleted with this call, returns true. Otherwise returns false.
// false. See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx //
// See https://msdn.microsoft.com/en-us/library/azure/dd179413.aspx
func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) { func (b BlobStorageClient) DeleteBlobIfExists(container, name string) (bool, error) {
resp, err := b.deleteBlob(container, name) resp, err := b.deleteBlob(container, name)
if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) { if resp != nil && (resp.statusCode == http.StatusAccepted || resp.statusCode == http.StatusNotFound) {
return resp.statusCode == http.StatusAccepted, nil return resp.statusCode == http.StatusAccepted, nil
} }
defer resp.body.Close()
return false, err return false, err
} }
@ -831,19 +803,22 @@ func pathForContainer(name string) string {
return fmt.Sprintf("/%s", name) return fmt.Sprintf("/%s", name)
} }
// helper method to construct the path to a blob given its container and blob name // helper method to construct the path to a blob given its container and blob
// name
func pathForBlob(container, name string) string { func pathForBlob(container, name string) string {
return fmt.Sprintf("/%s/%s", container, name) return fmt.Sprintf("/%s/%s", container, name)
} }
// GetBlobSASURI creates an URL to the specified blob which contains the Shared Access Signature // GetBlobSASURI creates an URL to the specified blob which contains the Shared
// with specified permissions and expiration time. See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx // Access Signature with specified permissions and expiration time.
//
// See https://msdn.microsoft.com/en-us/library/azure/ee395415.aspx
func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) { func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Time, permissions string) (string, error) {
var ( var (
signedPermissions = permissions signedPermissions = permissions
blobUrl = b.GetBlobUrl(container, name) blobURL = b.GetBlobURL(container, name)
) )
canonicalizedResource, err := b.client.buildCanonicalizedResource(blobUrl) canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -864,12 +839,12 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim
"sig": {sig}, "sig": {sig},
} }
sasUrl, err := url.Parse(blobUrl) sasURL, err := url.Parse(blobURL)
if err != nil { if err != nil {
return "", err return "", err
} }
sasUrl.RawQuery = sasParams.Encode() sasURL.RawQuery = sasParams.Encode()
return sasUrl.String(), nil return sasURL.String(), nil
} }
func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) { func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, signedPermissions string) (string, error) {
@ -878,7 +853,6 @@ func blobSASStringToSign(signedVersion, canonicalizedResource, signedExpiry, sig
// reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx // reference: http://msdn.microsoft.com/en-us/library/azure/dn140255.aspx
if signedVersion >= "2013-08-15" { if signedVersion >= "2013-08-15" {
return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil return fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", signedPermissions, signedStart, signedExpiry, canonicalizedResource, signedIdentifier, signedVersion, rscc, rscd, rsce, rscl, rsct), nil
} else {
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
} }
return "", errors.New("storage: not implemented SAS for versions earlier than 2013-08-15")
} }

View file

@ -0,0 +1,625 @@
package storage
import (
"bytes"
"crypto/rand"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"sort"
"sync"
"testing"
"time"
chk "gopkg.in/check.v1"
)
type StorageBlobSuite struct{}
var _ = chk.Suite(&StorageBlobSuite{})
const testContainerPrefix = "zzzztest-"
func getBlobClient(c *chk.C) BlobStorageClient {
return getBasicClient(c).GetBlobService()
}
func (s *StorageBlobSuite) Test_pathForContainer(c *chk.C) {
c.Assert(pathForContainer("foo"), chk.Equals, "/foo")
}
func (s *StorageBlobSuite) Test_pathForBlob(c *chk.C) {
c.Assert(pathForBlob("foo", "blob"), chk.Equals, "/foo/blob")
}
func (s *StorageBlobSuite) Test_blobSASStringToSign(c *chk.C) {
_, err := blobSASStringToSign("2012-02-12", "CS", "SE", "SP")
c.Assert(err, chk.NotNil) // not implemented SAS for versions earlier than 2013-08-15
out, err := blobSASStringToSign("2013-08-15", "CS", "SE", "SP")
c.Assert(err, chk.IsNil)
c.Assert(out, chk.Equals, "SP\n\nSE\nCS\n\n2013-08-15\n\n\n\n\n")
}
func (s *StorageBlobSuite) TestGetBlobSASURI(c *chk.C) {
api, err := NewClient("foo", "YmFy", DefaultBaseURL, "2013-08-15", true)
c.Assert(err, chk.IsNil)
cli := api.GetBlobService()
expiry := time.Time{}
expectedParts := url.URL{
Scheme: "https",
Host: "foo.blob.core.windows.net",
Path: "container/name",
RawQuery: url.Values{
"sv": {"2013-08-15"},
"sig": {"/OXG7rWh08jYwtU03GzJM0DHZtidRGpC6g69rSGm3I0="},
"sr": {"b"},
"sp": {"r"},
"se": {"0001-01-01T00:00:00Z"},
}.Encode()}
u, err := cli.GetBlobSASURI("container", "name", expiry, "r")
c.Assert(err, chk.IsNil)
sasParts, err := url.Parse(u)
c.Assert(err, chk.IsNil)
c.Assert(expectedParts.String(), chk.Equals, sasParts.String())
c.Assert(expectedParts.Query(), chk.DeepEquals, sasParts.Query())
}
func (s *StorageBlobSuite) TestBlobSASURICorrectness(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
blob := randString(20)
body := []byte(randString(100))
expiry := time.Now().UTC().Add(time.Hour)
permissions := "r"
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.DeleteContainer(cnt)
c.Assert(cli.putSingleBlockBlob(cnt, blob, body), chk.IsNil)
sasURI, err := cli.GetBlobSASURI(cnt, blob, expiry, permissions)
c.Assert(err, chk.IsNil)
resp, err := http.Get(sasURI)
c.Assert(err, chk.IsNil)
blobResp, err := ioutil.ReadAll(resp.Body)
defer resp.Body.Close()
c.Assert(err, chk.IsNil)
c.Assert(resp.StatusCode, chk.Equals, http.StatusOK)
c.Assert(len(blobResp), chk.Equals, len(body))
}
func (s *StorageBlobSuite) TestListContainersPagination(c *chk.C) {
cli := getBlobClient(c)
c.Assert(deleteTestContainers(cli), chk.IsNil)
const n = 5
const pageSize = 2
// Create test containers
created := []string{}
for i := 0; i < n; i++ {
name := randContainer()
c.Assert(cli.CreateContainer(name, ContainerAccessTypePrivate), chk.IsNil)
created = append(created, name)
}
sort.Strings(created)
// Defer test container deletions
defer func() {
var wg sync.WaitGroup
for _, cnt := range created {
wg.Add(1)
go func(name string) {
c.Assert(cli.DeleteContainer(name), chk.IsNil)
wg.Done()
}(cnt)
}
wg.Wait()
}()
// Paginate results
seen := []string{}
marker := ""
for {
resp, err := cli.ListContainers(ListContainersParameters{
Prefix: testContainerPrefix,
MaxResults: pageSize,
Marker: marker})
c.Assert(err, chk.IsNil)
containers := resp.Containers
if len(containers) > pageSize {
c.Fatalf("Got a bigger page. Expected: %d, got: %d", pageSize, len(containers))
}
for _, c := range containers {
seen = append(seen, c.Name)
}
marker = resp.NextMarker
if marker == "" || len(containers) == 0 {
break
}
}
c.Assert(seen, chk.DeepEquals, created)
}
func (s *StorageBlobSuite) TestContainerExists(c *chk.C) {
cnt := randContainer()
cli := getBlobClient(c)
ok, err := cli.ContainerExists(cnt)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, false)
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
defer cli.DeleteContainer(cnt)
ok, err = cli.ContainerExists(cnt)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, true)
}
func (s *StorageBlobSuite) TestCreateDeleteContainer(c *chk.C) {
cnt := randContainer()
cli := getBlobClient(c)
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
c.Assert(cli.DeleteContainer(cnt), chk.IsNil)
}
func (s *StorageBlobSuite) TestCreateContainerIfNotExists(c *chk.C) {
cnt := randContainer()
cli := getBlobClient(c)
// First create
ok, err := cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, true)
// Second create, should not give errors
ok, err = cli.CreateContainerIfNotExists(cnt, ContainerAccessTypePrivate)
c.Assert(err, chk.IsNil)
defer cli.DeleteContainer(cnt)
c.Assert(ok, chk.Equals, false)
}
func (s *StorageBlobSuite) TestDeleteContainerIfExists(c *chk.C) {
cnt := randContainer()
cli := getBlobClient(c)
// Nonexisting container
c.Assert(cli.DeleteContainer(cnt), chk.NotNil)
ok, err := cli.DeleteContainerIfExists(cnt)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, false)
// Existing container
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
ok, err = cli.DeleteContainerIfExists(cnt)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, true)
}
func (s *StorageBlobSuite) TestBlobExists(c *chk.C) {
cnt := randContainer()
blob := randString(20)
cli := getBlobClient(c)
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
defer cli.DeleteContainer(cnt)
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte("Hello!")), chk.IsNil)
defer cli.DeleteBlob(cnt, blob)
ok, err := cli.BlobExists(cnt, blob+".foo")
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, false)
ok, err = cli.BlobExists(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, true)
}
func (s *StorageBlobSuite) TestGetBlobURL(c *chk.C) {
api, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
cli := api.GetBlobService()
c.Assert(cli.GetBlobURL("c", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/c/nested/blob")
c.Assert(cli.GetBlobURL("", "blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/blob")
c.Assert(cli.GetBlobURL("", "nested/blob"), chk.Equals, "https://foo.blob.core.windows.net/$root/nested/blob")
}
func (s *StorageBlobSuite) TestBlobCopy(c *chk.C) {
if testing.Short() {
c.Skip("skipping blob copy in short mode, no SLA on async operation")
}
cli := getBlobClient(c)
cnt := randContainer()
src := randString(20)
dst := randString(20)
body := []byte(randString(1024))
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
c.Assert(cli.putSingleBlockBlob(cnt, src, body), chk.IsNil)
defer cli.DeleteBlob(cnt, src)
c.Assert(cli.CopyBlob(cnt, dst, cli.GetBlobURL(cnt, src)), chk.IsNil)
defer cli.DeleteBlob(cnt, dst)
blobBody, err := cli.GetBlob(cnt, dst)
c.Assert(err, chk.IsNil)
b, err := ioutil.ReadAll(blobBody)
defer blobBody.Close()
c.Assert(err, chk.IsNil)
c.Assert(b, chk.DeepEquals, body)
}
func (s *StorageBlobSuite) TestDeleteBlobIfExists(c *chk.C) {
cnt := randContainer()
blob := randString(20)
cli := getBlobClient(c)
c.Assert(cli.DeleteBlob(cnt, blob), chk.NotNil)
ok, err := cli.DeleteBlobIfExists(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, false)
}
func (s *StorageBlobSuite) TestGetBlobProperties(c *chk.C) {
cnt := randContainer()
blob := randString(20)
contents := randString(64)
cli := getBlobClient(c)
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.DeleteContainer(cnt)
// Nonexisting blob
_, err := cli.GetBlobProperties(cnt, blob)
c.Assert(err, chk.NotNil)
// Put the blob
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(contents)), chk.IsNil)
// Get blob properties
props, err := cli.GetBlobProperties(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(props.ContentLength, chk.Equals, int64(len(contents)))
c.Assert(props.BlobType, chk.Equals, BlobTypeBlock)
}
func (s *StorageBlobSuite) TestListBlobsPagination(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.DeleteContainer(cnt)
blobs := []string{}
const n = 5
const pageSize = 2
for i := 0; i < n; i++ {
name := randString(20)
c.Assert(cli.putSingleBlockBlob(cnt, name, []byte("Hello, world!")), chk.IsNil)
blobs = append(blobs, name)
}
sort.Strings(blobs)
// Paginate
seen := []string{}
marker := ""
for {
resp, err := cli.ListBlobs(cnt, ListBlobsParameters{
MaxResults: pageSize,
Marker: marker})
c.Assert(err, chk.IsNil)
for _, v := range resp.Blobs {
seen = append(seen, v.Name)
}
marker = resp.NextMarker
if marker == "" || len(resp.Blobs) == 0 {
break
}
}
// Compare
c.Assert(seen, chk.DeepEquals, blobs)
}
func (s *StorageBlobSuite) TestPutEmptyBlockBlob(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte{}), chk.IsNil)
props, err := cli.GetBlobProperties(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(props.ContentLength, chk.Not(chk.Equals), 0)
}
func (s *StorageBlobSuite) TestGetBlobRange(c *chk.C) {
cnt := randContainer()
blob := randString(20)
body := "0123456789"
cli := getBlobClient(c)
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypeBlob), chk.IsNil)
defer cli.DeleteContainer(cnt)
c.Assert(cli.putSingleBlockBlob(cnt, blob, []byte(body)), chk.IsNil)
defer cli.DeleteBlob(cnt, blob)
// Read 1-3
for _, r := range []struct {
rangeStr string
expected string
}{
{"0-", body},
{"1-3", body[1 : 3+1]},
{"3-", body[3:]},
} {
resp, err := cli.GetBlobRange(cnt, blob, r.rangeStr)
c.Assert(err, chk.IsNil)
blobBody, err := ioutil.ReadAll(resp)
c.Assert(err, chk.IsNil)
str := string(blobBody)
c.Assert(str, chk.Equals, r.expected)
}
}
func (s *StorageBlobSuite) TestPutBlock(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
chunk := []byte(randString(1024))
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
}
func (s *StorageBlobSuite) TestGetBlockList_PutBlockList(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
chunk := []byte(randString(1024))
blockID := base64.StdEncoding.EncodeToString([]byte("foo"))
// Put one block
c.Assert(cli.PutBlock(cnt, blob, blockID, chunk), chk.IsNil)
defer cli.deleteBlob(cnt, blob)
// Get committed blocks
committed, err := cli.GetBlockList(cnt, blob, BlockListTypeCommitted)
c.Assert(err, chk.IsNil)
if len(committed.CommittedBlocks) > 0 {
c.Fatal("There are committed blocks")
}
// Get uncommitted blocks
uncommitted, err := cli.GetBlockList(cnt, blob, BlockListTypeUncommitted)
c.Assert(err, chk.IsNil)
c.Assert(len(uncommitted.UncommittedBlocks), chk.Equals, 1)
// Commit block list
c.Assert(cli.PutBlockList(cnt, blob, []Block{{blockID, BlockStatusUncommitted}}), chk.IsNil)
// Get all blocks
all, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
c.Assert(err, chk.IsNil)
c.Assert(len(all.CommittedBlocks), chk.Equals, 1)
c.Assert(len(all.UncommittedBlocks), chk.Equals, 0)
// Verify the block
thatBlock := all.CommittedBlocks[0]
c.Assert(thatBlock.Name, chk.Equals, blockID)
c.Assert(thatBlock.Size, chk.Equals, int64(len(chunk)))
}
func (s *StorageBlobSuite) TestCreateBlockBlob(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
c.Assert(cli.CreateBlockBlob(cnt, blob), chk.IsNil)
// Verify
blocks, err := cli.GetBlockList(cnt, blob, BlockListTypeAll)
c.Assert(err, chk.IsNil)
c.Assert(len(blocks.CommittedBlocks), chk.Equals, 0)
c.Assert(len(blocks.UncommittedBlocks), chk.Equals, 0)
}
func (s *StorageBlobSuite) TestPutPageBlob(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
size := int64(10 * 1024 * 1024)
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
// Verify
props, err := cli.GetBlobProperties(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(props.ContentLength, chk.Equals, size)
c.Assert(props.BlobType, chk.Equals, BlobTypePage)
}
func (s *StorageBlobSuite) TestPutPagesUpdate(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
size := int64(10 * 1024 * 1024) // larger than we'll use
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
chunk1 := []byte(randString(1024))
chunk2 := []byte(randString(512))
// Append chunks
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk1)-1), PageWriteTypeUpdate, chunk1), chk.IsNil)
c.Assert(cli.PutPage(cnt, blob, int64(len(chunk1)), int64(len(chunk1)+len(chunk2)-1), PageWriteTypeUpdate, chunk2), chk.IsNil)
// Verify contents
out, err := cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
c.Assert(err, chk.IsNil)
defer out.Close()
blobContents, err := ioutil.ReadAll(out)
c.Assert(err, chk.IsNil)
c.Assert(blobContents, chk.DeepEquals, append(chunk1, chunk2...))
out.Close()
// Overwrite first half of chunk1
chunk0 := []byte(randString(512))
c.Assert(cli.PutPage(cnt, blob, 0, int64(len(chunk0)-1), PageWriteTypeUpdate, chunk0), chk.IsNil)
// Verify contents
out, err = cli.GetBlobRange(cnt, blob, fmt.Sprintf("%v-%v", 0, len(chunk1)+len(chunk2)-1))
c.Assert(err, chk.IsNil)
defer out.Close()
blobContents, err = ioutil.ReadAll(out)
c.Assert(err, chk.IsNil)
c.Assert(blobContents, chk.DeepEquals, append(append(chunk0, chunk1[512:]...), chunk2...))
}
func (s *StorageBlobSuite) TestPutPagesClear(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
size := int64(10 * 1024 * 1024) // larger than we'll use
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
// Put 0-2047
chunk := []byte(randString(2048))
c.Assert(cli.PutPage(cnt, blob, 0, 2047, PageWriteTypeUpdate, chunk), chk.IsNil)
// Clear 512-1023
c.Assert(cli.PutPage(cnt, blob, 512, 1023, PageWriteTypeClear, nil), chk.IsNil)
// Verify contents
out, err := cli.GetBlobRange(cnt, blob, "0-2047")
c.Assert(err, chk.IsNil)
contents, err := ioutil.ReadAll(out)
c.Assert(err, chk.IsNil)
defer out.Close()
c.Assert(contents, chk.DeepEquals, append(append(chunk[:512], make([]byte, 512)...), chunk[1024:]...))
}
func (s *StorageBlobSuite) TestGetPageRanges(c *chk.C) {
cli := getBlobClient(c)
cnt := randContainer()
c.Assert(cli.CreateContainer(cnt, ContainerAccessTypePrivate), chk.IsNil)
defer cli.deleteContainer(cnt)
blob := randString(20)
size := int64(10 * 1024 * 1024) // larger than we'll use
c.Assert(cli.PutPageBlob(cnt, blob, size), chk.IsNil)
// Get page ranges on empty blob
out, err := cli.GetPageRanges(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(len(out.PageList), chk.Equals, 0)
// Add 0-512 page
c.Assert(cli.PutPage(cnt, blob, 0, 511, PageWriteTypeUpdate, []byte(randString(512))), chk.IsNil)
out, err = cli.GetPageRanges(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(len(out.PageList), chk.Equals, 1)
// Add 1024-2048
c.Assert(cli.PutPage(cnt, blob, 1024, 2047, PageWriteTypeUpdate, []byte(randString(1024))), chk.IsNil)
out, err = cli.GetPageRanges(cnt, blob)
c.Assert(err, chk.IsNil)
c.Assert(len(out.PageList), chk.Equals, 2)
}
func deleteTestContainers(cli BlobStorageClient) error {
for {
resp, err := cli.ListContainers(ListContainersParameters{Prefix: testContainerPrefix})
if err != nil {
return err
}
if len(resp.Containers) == 0 {
break
}
for _, c := range resp.Containers {
err = cli.DeleteContainer(c.Name)
if err != nil {
return err
}
}
}
return nil
}
func (b BlobStorageClient) putSingleBlockBlob(container, name string, chunk []byte) error {
if len(chunk) > MaxBlobBlockSize {
return fmt.Errorf("storage: provided chunk (%d bytes) cannot fit into single-block blob (max %d bytes)", len(chunk), MaxBlobBlockSize)
}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{})
headers := b.client.getStandardHeaders()
headers["x-ms-blob-type"] = string(BlobTypeBlock)
headers["Content-Length"] = fmt.Sprintf("%v", len(chunk))
resp, err := b.client.exec("PUT", uri, headers, bytes.NewReader(chunk))
if err != nil {
return err
}
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
func randContainer() string {
return testContainerPrefix + randString(32-len(testContainerPrefix))
}
func randString(n int) string {
if n <= 0 {
panic("negative number")
}
const alphanum = "0123456789abcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
return string(bytes)
}

View file

@ -1,3 +1,4 @@
// Package storage provides clients for Microsoft Azure Storage Services.
package storage package storage
import ( import (
@ -15,22 +16,28 @@ import (
) )
const ( const (
DefaultBaseUrl = "core.windows.net" // DefaultBaseURL is the domain name used for storage requests when a
DefaultApiVersion = "2014-02-14" // default client is created.
defaultUseHttps = true DefaultBaseURL = "core.windows.net"
// DefaultAPIVersion is the Azure Storage API version string used when a
// basic client is created.
DefaultAPIVersion = "2014-02-14"
defaultUseHTTPS = true
blobServiceName = "blob" blobServiceName = "blob"
tableServiceName = "table" tableServiceName = "table"
queueServiceName = "queue" queueServiceName = "queue"
) )
// StorageClient is the object that needs to be constructed // Client is the object that needs to be constructed to perform
// to perform operations on the storage account. // operations on the storage account.
type StorageClient struct { type Client struct {
accountName string accountName string
accountKey []byte accountKey []byte
useHttps bool useHTTPS bool
baseUrl string baseURL string
apiVersion string apiVersion string
} }
@ -40,10 +47,10 @@ type storageResponse struct {
body io.ReadCloser body io.ReadCloser
} }
// StorageServiceError contains fields of the error response from // AzureStorageServiceError contains fields of the error response from
// Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx // Azure Storage Service REST API. See https://msdn.microsoft.com/en-us/library/azure/dd179382.aspx
// Some fields might be specific to certain calls. // Some fields might be specific to certain calls.
type StorageServiceError struct { type AzureStorageServiceError struct {
Code string `xml:"Code"` Code string `xml:"Code"`
Message string `xml:"Message"` Message string `xml:"Message"`
AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"` AuthenticationErrorDetail string `xml:"AuthenticationErrorDetail"`
@ -51,25 +58,43 @@ type StorageServiceError struct {
QueryParameterValue string `xml:"QueryParameterValue"` QueryParameterValue string `xml:"QueryParameterValue"`
Reason string `xml:"Reason"` Reason string `xml:"Reason"`
StatusCode int StatusCode int
RequestId string RequestID string
} }
// NewBasicClient constructs a StorageClient with given storage service name // UnexpectedStatusCodeError is returned when a storage service responds with neither an error
// and key. // nor with an HTTP status code indicating success.
func NewBasicClient(accountName, accountKey string) (StorageClient, error) { type UnexpectedStatusCodeError struct {
return NewClient(accountName, accountKey, DefaultBaseUrl, DefaultApiVersion, defaultUseHttps) allowed []int
got int
} }
// NewClient constructs a StorageClient. This should be used if the caller func (e UnexpectedStatusCodeError) Error() string {
// wants to specify whether to use HTTPS, a specific REST API version or a s := func(i int) string { return fmt.Sprintf("%d %s", i, http.StatusText(i)) }
// custom storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, blobServiceBaseUrl, apiVersion string, useHttps bool) (StorageClient, error) { got := s(e.got)
var c StorageClient expected := []string{}
for _, v := range e.allowed {
expected = append(expected, s(v))
}
return fmt.Sprintf("storage: status code from service response is %s; was expecting %s", got, strings.Join(expected, " or "))
}
// NewBasicClient constructs a Client with given storage service name and
// key.
func NewBasicClient(accountName, accountKey string) (Client, error) {
return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS)
}
// NewClient constructs a Client. This should be used if the caller wants
// to specify whether to use HTTPS, a specific REST API version or a custom
// storage endpoint than Azure Public Cloud.
func NewClient(accountName, accountKey, blobServiceBaseURL, apiVersion string, useHTTPS bool) (Client, error) {
var c Client
if accountName == "" { if accountName == "" {
return c, fmt.Errorf("azure: account name required") return c, fmt.Errorf("azure: account name required")
} else if accountKey == "" { } else if accountKey == "" {
return c, fmt.Errorf("azure: account key required") return c, fmt.Errorf("azure: account key required")
} else if blobServiceBaseUrl == "" { } else if blobServiceBaseURL == "" {
return c, fmt.Errorf("azure: base storage service url required") return c, fmt.Errorf("azure: base storage service url required")
} }
@ -78,22 +103,22 @@ func NewClient(accountName, accountKey, blobServiceBaseUrl, apiVersion string, u
return c, err return c, err
} }
return StorageClient{ return Client{
accountName: accountName, accountName: accountName,
accountKey: key, accountKey: key,
useHttps: useHttps, useHTTPS: useHTTPS,
baseUrl: blobServiceBaseUrl, baseURL: blobServiceBaseURL,
apiVersion: apiVersion, apiVersion: apiVersion,
}, nil }, nil
} }
func (c StorageClient) getBaseUrl(service string) string { func (c Client) getBaseURL(service string) string {
scheme := "http" scheme := "http"
if c.useHttps { if c.useHTTPS {
scheme = "https" scheme = "https"
} }
host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseUrl) host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL)
u := &url.URL{ u := &url.URL{
Scheme: scheme, Scheme: scheme,
@ -101,8 +126,8 @@ func (c StorageClient) getBaseUrl(service string) string {
return u.String() return u.String()
} }
func (c StorageClient) getEndpoint(service, path string, params url.Values) string { func (c Client) getEndpoint(service, path string, params url.Values) string {
u, err := url.Parse(c.getBaseUrl(service)) u, err := url.Parse(c.getBaseURL(service))
if err != nil { if err != nil {
// really should not be happening // really should not be happening
panic(err) panic(err)
@ -117,18 +142,24 @@ func (c StorageClient) getEndpoint(service, path string, params url.Values) stri
return u.String() return u.String()
} }
// GetBlobService returns a BlobStorageClient which can operate on the // GetBlobService returns a BlobStorageClient which can operate on the blob
// blob service of the storage account. // service of the storage account.
func (c StorageClient) GetBlobService() *BlobStorageClient { func (c Client) GetBlobService() BlobStorageClient {
return &BlobStorageClient{c} return BlobStorageClient{c}
} }
func (c StorageClient) createAuthorizationHeader(canonicalizedString string) string { // GetQueueService returns a QueueServiceClient which can operate on the queue
// service of the storage account.
func (c Client) GetQueueService() QueueServiceClient {
return QueueServiceClient{c}
}
func (c Client) createAuthorizationHeader(canonicalizedString string) string {
signature := c.computeHmac256(canonicalizedString) signature := c.computeHmac256(canonicalizedString)
return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature)
} }
func (c StorageClient) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) {
canonicalizedResource, err := c.buildCanonicalizedResource(url) canonicalizedResource, err := c.buildCanonicalizedResource(url)
if err != nil { if err != nil {
return "", err return "", err
@ -138,14 +169,14 @@ func (c StorageClient) getAuthorizationHeader(verb, url string, headers map[stri
return c.createAuthorizationHeader(canonicalizedString), nil return c.createAuthorizationHeader(canonicalizedString), nil
} }
func (c StorageClient) getStandardHeaders() map[string]string { func (c Client) getStandardHeaders() map[string]string {
return map[string]string{ return map[string]string{
"x-ms-version": c.apiVersion, "x-ms-version": c.apiVersion,
"x-ms-date": currentTimeRfc1123Formatted(), "x-ms-date": currentTimeRfc1123Formatted(),
} }
} }
func (c StorageClient) buildCanonicalizedHeader(headers map[string]string) string { func (c Client) buildCanonicalizedHeader(headers map[string]string) string {
cm := make(map[string]string) cm := make(map[string]string)
for k, v := range headers { for k, v := range headers {
@ -179,7 +210,7 @@ func (c StorageClient) buildCanonicalizedHeader(headers map[string]string) strin
return ch return ch
} }
func (c StorageClient) buildCanonicalizedResource(uri string) (string, error) { func (c Client) buildCanonicalizedResource(uri string) (string, error) {
errMsg := "buildCanonicalizedResource error: %s" errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri) u, err := url.Parse(uri)
if err != nil { if err != nil {
@ -220,7 +251,7 @@ func (c StorageClient) buildCanonicalizedResource(uri string) (string, error) {
return cr, nil return cr, nil
} }
func (c StorageClient) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string { func (c Client) buildCanonicalizedString(verb string, headers map[string]string, canonicalizedResource string) string {
canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s", canonicalizedString := fmt.Sprintf("%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s",
verb, verb,
headers["Content-Encoding"], headers["Content-Encoding"],
@ -240,7 +271,7 @@ func (c StorageClient) buildCanonicalizedString(verb string, headers map[string]
return canonicalizedString return canonicalizedString
} }
func (c StorageClient) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) { func (c Client) exec(verb, url string, headers map[string]string, body io.Reader) (*storageResponse, error) {
authHeader, err := c.getAuthorizationHeader(verb, url, headers) authHeader, err := c.getAuthorizationHeader(verb, url, headers)
if err != nil { if err != nil {
return nil, err return nil, err
@ -271,10 +302,10 @@ func (c StorageClient) exec(verb, url string, headers map[string]string, body io
if len(respBody) == 0 { if len(respBody) == 0 {
// no error in response body // no error in response body
err = fmt.Errorf("storage: service returned without a response body (%s).", resp.Status) err = fmt.Errorf("storage: service returned without a response body (%s)", resp.Status)
} else { } else {
// response contains storage service error object, unmarshal // response contains storage service error object, unmarshal
storageErr, errIn := serviceErrFromXml(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id")) storageErr, errIn := serviceErrFromXML(respBody, resp.StatusCode, resp.Header.Get("x-ms-request-id"))
if err != nil { // error unmarshaling the error response if err != nil { // error unmarshaling the error response
err = errIn err = errIn
} }
@ -302,16 +333,27 @@ func readResponseBody(resp *http.Response) ([]byte, error) {
return out, err return out, err
} }
func serviceErrFromXml(body []byte, statusCode int, requestId string) (StorageServiceError, error) { func serviceErrFromXML(body []byte, statusCode int, requestID string) (AzureStorageServiceError, error) {
var storageErr StorageServiceError var storageErr AzureStorageServiceError
if err := xml.Unmarshal(body, &storageErr); err != nil { if err := xml.Unmarshal(body, &storageErr); err != nil {
return storageErr, err return storageErr, err
} }
storageErr.StatusCode = statusCode storageErr.StatusCode = statusCode
storageErr.RequestId = requestId storageErr.RequestID = requestID
return storageErr, nil return storageErr, nil
} }
func (e StorageServiceError) Error() string { func (e AzureStorageServiceError) Error() string {
return fmt.Sprintf("storage: remote server returned error. StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestId) return fmt.Sprintf("storage: service returned error: StatusCode=%d, ErrorCode=%s, ErrorMessage=%s, RequestId=%s", e.StatusCode, e.Code, e.Message, e.RequestID)
}
// checkRespCode returns UnexpectedStatusError if the given response code is not
// one of the allowed status codes; otherwise nil.
func checkRespCode(respCode int, allowed []int) error {
for _, v := range allowed {
if respCode == v {
return nil
}
}
return UnexpectedStatusCodeError{allowed, respCode}
} }

View file

@ -0,0 +1,156 @@
package storage
import (
"encoding/base64"
"net/url"
"os"
"testing"
chk "gopkg.in/check.v1"
)
// Hook up gocheck to testing
func Test(t *testing.T) { chk.TestingT(t) }
type StorageClientSuite struct{}
var _ = chk.Suite(&StorageClientSuite{})
// getBasicClient returns a test client from storage credentials in the env
func getBasicClient(c *chk.C) Client {
name := os.Getenv("ACCOUNT_NAME")
if name == "" {
c.Fatal("ACCOUNT_NAME not set, need an empty storage account to test")
}
key := os.Getenv("ACCOUNT_KEY")
if key == "" {
c.Fatal("ACCOUNT_KEY not set")
}
cli, err := NewBasicClient(name, key)
c.Assert(err, chk.IsNil)
return cli
}
func (s *StorageClientSuite) TestGetBaseURL_Basic_Https(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
c.Assert(cli.apiVersion, chk.Equals, DefaultAPIVersion)
c.Assert(err, chk.IsNil)
c.Assert(cli.getBaseURL("table"), chk.Equals, "https://foo.table.core.windows.net")
}
func (s *StorageClientSuite) TestGetBaseURL_Custom_NoHttps(c *chk.C) {
apiVersion := "2015-01-01" // a non existing one
cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false)
c.Assert(err, chk.IsNil)
c.Assert(cli.apiVersion, chk.Equals, apiVersion)
c.Assert(cli.getBaseURL("table"), chk.Equals, "http://foo.table.core.chinacloudapi.cn")
}
func (s *StorageClientSuite) TestGetEndpoint_None(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
output := cli.getEndpoint(blobServiceName, "", url.Values{})
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/")
}
func (s *StorageClientSuite) TestGetEndpoint_PathOnly(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
output := cli.getEndpoint(blobServiceName, "path", url.Values{})
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path")
}
func (s *StorageClientSuite) TestGetEndpoint_ParamsOnly(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
params := url.Values{}
params.Set("a", "b")
params.Set("c", "d")
output := cli.getEndpoint(blobServiceName, "", params)
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/?a=b&c=d")
}
func (s *StorageClientSuite) TestGetEndpoint_Mixed(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
params := url.Values{}
params.Set("a", "b")
params.Set("c", "d")
output := cli.getEndpoint(blobServiceName, "path", params)
c.Assert(output, chk.Equals, "https://foo.blob.core.windows.net/path?a=b&c=d")
}
func (s *StorageClientSuite) Test_getStandardHeaders(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
headers := cli.getStandardHeaders()
c.Assert(len(headers), chk.Equals, 2)
c.Assert(headers["x-ms-version"], chk.Equals, cli.apiVersion)
if _, ok := headers["x-ms-date"]; !ok {
c.Fatal("Missing date header")
}
}
func (s *StorageClientSuite) Test_buildCanonicalizedResource(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
type test struct{ url, expected string }
tests := []test{
{"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"},
{"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"},
{"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"},
}
for _, i := range tests {
out, err := cli.buildCanonicalizedResource(i.url)
c.Assert(err, chk.IsNil)
c.Assert(out, chk.Equals, i.expected)
}
}
func (s *StorageClientSuite) Test_buildCanonicalizedHeader(c *chk.C) {
cli, err := NewBasicClient("foo", "YmFy")
c.Assert(err, chk.IsNil)
type test struct {
headers map[string]string
expected string
}
tests := []test{
{map[string]string{}, ""},
{map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"},
{map[string]string{"foo:": "bar"}, ""},
{map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"},
{map[string]string{
"x-ms-version": "9999-99-99",
"x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}}
for _, i := range tests {
c.Assert(cli.buildCanonicalizedHeader(i.headers), chk.Equals, i.expected)
}
}
func (s *StorageClientSuite) TestReturnsStorageServiceError(c *chk.C) {
// attempt to delete a nonexisting container
_, err := getBlobClient(c).deleteContainer(randContainer())
c.Assert(err, chk.NotNil)
v, ok := err.(AzureStorageServiceError)
c.Check(ok, chk.Equals, true)
c.Assert(v.StatusCode, chk.Equals, 404)
c.Assert(v.Code, chk.Equals, "ContainerNotFound")
c.Assert(v.Code, chk.Not(chk.Equals), "")
}
func (s *StorageClientSuite) Test_createAuthorizationHeader(c *chk.C) {
key := base64.StdEncoding.EncodeToString([]byte("bar"))
cli, err := NewBasicClient("foo", key)
c.Assert(err, chk.IsNil)
canonicalizedString := `foobarzoo`
expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`
c.Assert(cli.createAuthorizationHeader(canonicalizedString), chk.Equals, expected)
}

View file

@ -0,0 +1,230 @@
package storage
import (
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
)
// QueueServiceClient contains operations for Microsoft Azure Queue Storage
// Service.
type QueueServiceClient struct {
client Client
}
func pathForQueue(queue string) string { return fmt.Sprintf("/%s", queue) }
func pathForQueueMessages(queue string) string { return fmt.Sprintf("/%s/messages", queue) }
func pathForMessage(queue, name string) string { return fmt.Sprintf("/%s/messages/%s", queue, name) }
type putMessageRequest struct {
XMLName xml.Name `xml:"QueueMessage"`
MessageText string `xml:"MessageText"`
}
// PutMessageParameters is the set of options can be specified for Put Messsage
// operation. A zero struct does not use any preferences for the request.
type PutMessageParameters struct {
VisibilityTimeout int
MessageTTL int
}
func (p PutMessageParameters) getParameters() url.Values {
out := url.Values{}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
if p.MessageTTL != 0 {
out.Set("messagettl", strconv.Itoa(p.MessageTTL))
}
return out
}
// GetMessagesParameters is the set of options can be specified for Get
// Messsages operation. A zero struct does not use any preferences for the
// request.
type GetMessagesParameters struct {
NumOfMessages int
VisibilityTimeout int
}
func (p GetMessagesParameters) getParameters() url.Values {
out := url.Values{}
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
if p.VisibilityTimeout != 0 {
out.Set("visibilitytimeout", strconv.Itoa(p.VisibilityTimeout))
}
return out
}
// PeekMessagesParameters is the set of options can be specified for Peek
// Messsage operation. A zero struct does not use any preferences for the
// request.
type PeekMessagesParameters struct {
NumOfMessages int
}
func (p PeekMessagesParameters) getParameters() url.Values {
out := url.Values{"peekonly": {"true"}} // Required for peek operation
if p.NumOfMessages != 0 {
out.Set("numofmessages", strconv.Itoa(p.NumOfMessages))
}
return out
}
// GetMessagesResponse represents a response returned from Get Messages
// operation.
type GetMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []GetMessageResponse `xml:"QueueMessage"`
}
// GetMessageResponse represents a QueueMessage object returned from Get
// Messages operation response.
type GetMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
PopReceipt string `xml:"PopReceipt"`
TimeNextVisible string `xml:"TimeNextVisible"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// PeekMessagesResponse represents a response returned from Get Messages
// operation.
type PeekMessagesResponse struct {
XMLName xml.Name `xml:"QueueMessagesList"`
QueueMessagesList []PeekMessageResponse `xml:"QueueMessage"`
}
// PeekMessageResponse represents a QueueMessage object returned from Peek
// Messages operation response.
type PeekMessageResponse struct {
MessageID string `xml:"MessageId"`
InsertionTime string `xml:"InsertionTime"`
ExpirationTime string `xml:"ExpirationTime"`
DequeueCount int `xml:"DequeueCount"`
MessageText string `xml:"MessageText"`
}
// CreateQueue operation creates a queue under the given account.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179342.aspx
func (c QueueServiceClient) CreateQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
headers := c.client.getStandardHeaders()
headers["Content-Length"] = "0"
resp, err := c.client.exec("PUT", uri, headers, nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// DeleteQueue operation permanently deletes the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179436.aspx
func (c QueueServiceClient) DeleteQueue(name string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// QueueExists returns true if a queue with given name exists.
func (c QueueServiceClient) QueueExists(name string) (bool, error) {
uri := c.client.getEndpoint(queueServiceName, pathForQueue(name), url.Values{"comp": {"metadata"}})
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if resp != nil && (resp.statusCode == http.StatusOK || resp.statusCode == http.StatusNotFound) {
return resp.statusCode == http.StatusOK, nil
}
return false, err
}
// PutMessage operation adds a new message to the back of the message queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179346.aspx
func (c QueueServiceClient) PutMessage(queue string, message string, params PutMessageParameters) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
req := putMessageRequest{MessageText: message}
body, nn, err := xmlMarshal(req)
if err != nil {
return err
}
headers := c.client.getStandardHeaders()
headers["Content-Length"] = strconv.Itoa(nn)
resp, err := c.client.exec("POST", uri, headers, body)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusCreated})
}
// ClearMessages operation deletes all messages from the specified queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179454.aspx
func (c QueueServiceClient) ClearMessages(queue string) error {
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), url.Values{})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}
// GetMessages operation retrieves one or more messages from the front of the
// queue.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179474.aspx
func (c QueueServiceClient) GetMessages(queue string, params GetMessagesParameters) (GetMessagesResponse, error) {
var r GetMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// PeekMessages retrieves one or more messages from the front of the queue, but
// does not alter the visibility of the message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179472.aspx
func (c QueueServiceClient) PeekMessages(queue string, params PeekMessagesParameters) (PeekMessagesResponse, error) {
var r PeekMessagesResponse
uri := c.client.getEndpoint(queueServiceName, pathForQueueMessages(queue), params.getParameters())
resp, err := c.client.exec("GET", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return r, err
}
defer resp.body.Close()
err = xmlUnmarshal(resp.body, &r)
return r, err
}
// DeleteMessage operation deletes the specified message.
//
// See https://msdn.microsoft.com/en-us/library/azure/dd179347.aspx
func (c QueueServiceClient) DeleteMessage(queue, messageID, popReceipt string) error {
uri := c.client.getEndpoint(queueServiceName, pathForMessage(queue, messageID), url.Values{
"popreceipt": {popReceipt}})
resp, err := c.client.exec("DELETE", uri, c.client.getStandardHeaders(), nil)
if err != nil {
return err
}
defer resp.body.Close()
return checkRespCode(resp.statusCode, []int{http.StatusNoContent})
}

View file

@ -0,0 +1,91 @@
package storage
import (
chk "gopkg.in/check.v1"
)
type StorageQueueSuite struct{}
var _ = chk.Suite(&StorageQueueSuite{})
func getQueueClient(c *chk.C) QueueServiceClient {
return getBasicClient(c).GetQueueService()
}
func (s *StorageQueueSuite) Test_pathForQueue(c *chk.C) {
c.Assert(pathForQueue("q"), chk.Equals, "/q")
}
func (s *StorageQueueSuite) Test_pathForQueueMessages(c *chk.C) {
c.Assert(pathForQueueMessages("q"), chk.Equals, "/q/messages")
}
func (s *StorageQueueSuite) Test_pathForMessage(c *chk.C) {
c.Assert(pathForMessage("q", "m"), chk.Equals, "/q/messages/m")
}
func (s *StorageQueueSuite) TestCreateQueue_DeleteQueue(c *chk.C) {
cli := getQueueClient(c)
name := randString(20)
c.Assert(cli.CreateQueue(name), chk.IsNil)
c.Assert(cli.DeleteQueue(name), chk.IsNil)
}
func (s *StorageQueueSuite) TestQueueExists(c *chk.C) {
cli := getQueueClient(c)
ok, err := cli.QueueExists("nonexistent-queue")
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, false)
name := randString(20)
c.Assert(cli.CreateQueue(name), chk.IsNil)
defer cli.DeleteQueue(name)
ok, err = cli.QueueExists(name)
c.Assert(err, chk.IsNil)
c.Assert(ok, chk.Equals, true)
}
func (s *StorageQueueSuite) TestPostMessage_PeekMessage_DeleteMessage(c *chk.C) {
q := randString(20)
cli := getQueueClient(c)
c.Assert(cli.CreateQueue(q), chk.IsNil)
defer cli.DeleteQueue(q)
msg := randString(64 * 1024) // exercise max length
c.Assert(cli.PutMessage(q, msg, PutMessageParameters{}), chk.IsNil)
r, err := cli.PeekMessages(q, PeekMessagesParameters{})
c.Assert(err, chk.IsNil)
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
c.Assert(r.QueueMessagesList[0].MessageText, chk.Equals, msg)
}
func (s *StorageQueueSuite) TestGetMessages(c *chk.C) {
q := randString(20)
cli := getQueueClient(c)
c.Assert(cli.CreateQueue(q), chk.IsNil)
defer cli.DeleteQueue(q)
n := 4
for i := 0; i < n; i++ {
c.Assert(cli.PutMessage(q, randString(10), PutMessageParameters{}), chk.IsNil)
}
r, err := cli.GetMessages(q, GetMessagesParameters{NumOfMessages: n})
c.Assert(err, chk.IsNil)
c.Assert(len(r.QueueMessagesList), chk.Equals, n)
}
func (s *StorageQueueSuite) TestDeleteMessages(c *chk.C) {
q := randString(20)
cli := getQueueClient(c)
c.Assert(cli.CreateQueue(q), chk.IsNil)
defer cli.DeleteQueue(q)
c.Assert(cli.PutMessage(q, "message", PutMessageParameters{}), chk.IsNil)
r, err := cli.GetMessages(q, GetMessagesParameters{VisibilityTimeout: 1})
c.Assert(err, chk.IsNil)
c.Assert(len(r.QueueMessagesList), chk.Equals, 1)
m := r.QueueMessagesList[0]
c.Assert(cli.DeleteMessage(q, m.MessageID, m.PopReceipt), chk.IsNil)
}

View file

@ -1,6 +1,7 @@
package storage package storage
import ( import (
"bytes"
"crypto/hmac" "crypto/hmac"
"crypto/sha256" "crypto/sha256"
"encoding/base64" "encoding/base64"
@ -13,7 +14,7 @@ import (
"time" "time"
) )
func (c StorageClient) computeHmac256(message string) string { func (c Client) computeHmac256(message string) string {
h := hmac.New(sha256.New, c.accountKey) h := hmac.New(sha256.New, c.accountKey)
h.Write([]byte(message)) h.Write([]byte(message))
return base64.StdEncoding.EncodeToString(h.Sum(nil)) return base64.StdEncoding.EncodeToString(h.Sum(nil))
@ -47,17 +48,24 @@ func mergeParams(v1, v2 url.Values) url.Values {
func prepareBlockListRequest(blocks []Block) string { func prepareBlockListRequest(blocks []Block) string {
s := `<?xml version="1.0" encoding="utf-8"?><BlockList>` s := `<?xml version="1.0" encoding="utf-8"?><BlockList>`
for _, v := range blocks { for _, v := range blocks {
s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.Id, v.Status) s += fmt.Sprintf("<%s>%s</%s>", v.Status, v.ID, v.Status)
} }
s += `</BlockList>` s += `</BlockList>`
return s return s
} }
func xmlUnmarshal(body io.ReadCloser, v interface{}) error { func xmlUnmarshal(body io.Reader, v interface{}) error {
data, err := ioutil.ReadAll(body) data, err := ioutil.ReadAll(body)
if err != nil { if err != nil {
return err return err
} }
defer body.Close()
return xml.Unmarshal(data, v) return xml.Unmarshal(data, v)
} }
func xmlMarshal(v interface{}) (io.Reader, int, error) {
b, err := xml.Marshal(v)
if err != nil {
return nil, 0, err
}
return bytes.NewReader(b), len(b), nil
}

View file

@ -0,0 +1,69 @@
package storage
import (
"encoding/xml"
"io/ioutil"
"net/url"
"strings"
"time"
chk "gopkg.in/check.v1"
)
func (s *StorageClientSuite) Test_timeRfc1123Formatted(c *chk.C) {
now := time.Now().UTC()
expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT"
c.Assert(timeRfc1123Formatted(now), chk.Equals, now.Format(expectedLayout))
}
func (s *StorageClientSuite) Test_mergeParams(c *chk.C) {
v1 := url.Values{
"k1": {"v1"},
"k2": {"v2"}}
v2 := url.Values{
"k1": {"v11"},
"k3": {"v3"}}
out := mergeParams(v1, v2)
c.Assert(out.Get("k1"), chk.Equals, "v1")
c.Assert(out.Get("k2"), chk.Equals, "v2")
c.Assert(out.Get("k3"), chk.Equals, "v3")
c.Assert(out["k1"], chk.DeepEquals, []string{"v1", "v11"})
}
func (s *StorageClientSuite) Test_prepareBlockListRequest(c *chk.C) {
empty := []Block{}
expected := `<?xml version="1.0" encoding="utf-8"?><BlockList></BlockList>`
c.Assert(prepareBlockListRequest(empty), chk.DeepEquals, expected)
blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}}
expected = `<?xml version="1.0" encoding="utf-8"?><BlockList><Latest>foo</Latest><Uncommitted>bar</Uncommitted></BlockList>`
c.Assert(prepareBlockListRequest(blocks), chk.DeepEquals, expected)
}
func (s *StorageClientSuite) Test_xmlUnmarshal(c *chk.C) {
xml := `<?xml version="1.0" encoding="utf-8"?>
<Blob>
<Name>myblob</Name>
</Blob>`
var blob Blob
body := ioutil.NopCloser(strings.NewReader(xml))
c.Assert(xmlUnmarshal(body, &blob), chk.IsNil)
c.Assert(blob.Name, chk.Equals, "myblob")
}
func (s *StorageClientSuite) Test_xmlMarshal(c *chk.C) {
type t struct {
XMLName xml.Name `xml:"S"`
Name string `xml:"Name"`
}
b := t{Name: "myblob"}
expected := `<S><Name>myblob</Name></S>`
r, i, err := xmlMarshal(b)
c.Assert(err, chk.IsNil)
o, err := ioutil.ReadAll(r)
c.Assert(err, chk.IsNil)
out := string(o)
c.Assert(out, chk.Equals, expected)
c.Assert(i, chk.Equals, len(expected))
}

File diff suppressed because it is too large Load diff

View file

@ -1,203 +0,0 @@
package storage
import (
"encoding/base64"
"net/url"
"testing"
)
func TestGetBaseUrl_Basic_Https(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
if cli.apiVersion != DefaultApiVersion {
t.Fatalf("Wrong api version. Expected: '%s', got: '%s'", DefaultApiVersion, cli.apiVersion)
}
if err != nil {
t.Fatal(err)
}
output := cli.getBaseUrl("table")
if expected := "https://foo.table.core.windows.net"; output != expected {
t.Fatalf("Wrong base url. Expected: '%s', got: '%s'", expected, output)
}
}
func TestGetBaseUrl_Custom_NoHttps(t *testing.T) {
apiVersion := DefaultApiVersion
cli, err := NewClient("foo", "YmFy", "core.chinacloudapi.cn", apiVersion, false)
if err != nil {
t.Fatal(err)
}
if cli.apiVersion != apiVersion {
t.Fatalf("Wrong api version. Expected: '%s', got: '%s'", apiVersion, cli.apiVersion)
}
output := cli.getBaseUrl("table")
if expected := "http://foo.table.core.chinacloudapi.cn"; output != expected {
t.Fatalf("Wrong base url. Expected: '%s', got: '%s'", expected, output)
}
}
func TestGetEndpoint_None(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
output := cli.getEndpoint(blobServiceName, "", url.Values{})
if expected := "https://foo.blob.core.windows.net/"; output != expected {
t.Fatalf("Wrong endpoint url. Expected: '%s', got: '%s'", expected, output)
}
}
func TestGetEndpoint_PathOnly(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
output := cli.getEndpoint(blobServiceName, "path", url.Values{})
if expected := "https://foo.blob.core.windows.net/path"; output != expected {
t.Fatalf("Wrong endpoint url. Expected: '%s', got: '%s'", expected, output)
}
}
func TestGetEndpoint_ParamsOnly(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
params := url.Values{}
params.Set("a", "b")
params.Set("c", "d")
output := cli.getEndpoint(blobServiceName, "", params)
if expected := "https://foo.blob.core.windows.net/?a=b&c=d"; output != expected {
t.Fatalf("Wrong endpoint url. Expected: '%s', got: '%s'", expected, output)
}
}
func TestGetEndpoint_Mixed(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
params := url.Values{}
params.Set("a", "b")
params.Set("c", "d")
output := cli.getEndpoint(blobServiceName, "path", params)
if expected := "https://foo.blob.core.windows.net/path?a=b&c=d"; output != expected {
t.Fatalf("Wrong endpoint url. Expected: '%s', got: '%s'", expected, output)
}
}
func Test_getStandardHeaders(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
headers := cli.getStandardHeaders()
if len(headers) != 2 {
t.Fatal("Wrong standard header count")
}
if v, ok := headers["x-ms-version"]; !ok || v != cli.apiVersion {
t.Fatal("Wrong version header")
}
if _, ok := headers["x-ms-date"]; !ok {
t.Fatal("Missing date header")
}
}
func Test_buildCanonicalizedResource(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
type test struct{ url, expected string }
tests := []test{
{"https://foo.blob.core.windows.net/path?a=b&c=d", "/foo/path\na:b\nc:d"},
{"https://foo.blob.core.windows.net/?comp=list", "/foo/\ncomp:list"},
{"https://foo.blob.core.windows.net/cnt/blob", "/foo/cnt/blob"},
}
for _, i := range tests {
if out, err := cli.buildCanonicalizedResource(i.url); err != nil {
t.Fatal(err)
} else if out != i.expected {
t.Fatalf("Wrong canonicalized resource. Expected:\n'%s', Got:\n'%s'", i.expected, out)
}
}
}
func Test_buildCanonicalizedHeader(t *testing.T) {
cli, err := NewBasicClient("foo", "YmFy")
if err != nil {
t.Fatal(err)
}
type test struct {
headers map[string]string
expected string
}
tests := []test{
{map[string]string{}, ""},
{map[string]string{"x-ms-foo": "bar"}, "x-ms-foo:bar"},
{map[string]string{"foo:": "bar"}, ""},
{map[string]string{"foo:": "bar", "x-ms-foo": "bar"}, "x-ms-foo:bar"},
{map[string]string{
"x-ms-version": "9999-99-99",
"x-ms-blob-type": "BlockBlob"}, "x-ms-blob-type:BlockBlob\nx-ms-version:9999-99-99"}}
for _, i := range tests {
if out := cli.buildCanonicalizedHeader(i.headers); out != i.expected {
t.Fatalf("Wrong canonicalized resource. Expected:\n'%s', Got:\n'%s'", i.expected, out)
}
}
}
func TestReturnsStorageServiceError(t *testing.T) {
cli, err := getBlobClient()
if err != nil {
t.Fatal(err)
}
// attempt to delete a nonexisting container
_, err = cli.deleteContainer(randContainer())
if err == nil {
t.Fatal("Service has not returned an error")
}
if v, ok := err.(StorageServiceError); !ok {
t.Fatal("Cannot assert to specific error")
} else if v.StatusCode != 404 {
t.Fatalf("Expected status:%d, got: %d", 404, v.StatusCode)
} else if v.Code != "ContainerNotFound" {
t.Fatalf("Expected code: %s, got: %s", "ContainerNotFound", v.Code)
} else if v.RequestId == "" {
t.Fatalf("RequestId does not exist")
}
}
func Test_createAuthorizationHeader(t *testing.T) {
key := base64.StdEncoding.EncodeToString([]byte("bar"))
cli, err := NewBasicClient("foo", key)
if err != nil {
t.Fatal(err)
}
canonicalizedString := `foobarzoo`
expected := `SharedKey foo:h5U0ATVX6SpbFX1H6GNuxIMeXXCILLoIvhflPtuQZ30=`
if out := cli.createAuthorizationHeader(canonicalizedString); out != expected {
t.Fatalf("Wrong authorization header. Expected: '%s', Got:'%s'", expected, out)
}
}

View file

@ -1,80 +0,0 @@
package storage
import (
"io/ioutil"
"net/url"
"reflect"
"strings"
"testing"
"time"
)
func Test_timeRfc1123Formatted(t *testing.T) {
now := time.Now().UTC()
expectedLayout := "Mon, 02 Jan 2006 15:04:05 GMT"
expected := now.Format(expectedLayout)
if output := timeRfc1123Formatted(now); output != expected {
t.Errorf("Expected: %s, got: %s", expected, output)
}
}
func Test_mergeParams(t *testing.T) {
v1 := url.Values{
"k1": {"v1"},
"k2": {"v2"}}
v2 := url.Values{
"k1": {"v11"},
"k3": {"v3"}}
out := mergeParams(v1, v2)
if v := out.Get("k1"); v != "v1" {
t.Errorf("Wrong value for k1: %s", v)
}
if v := out.Get("k2"); v != "v2" {
t.Errorf("Wrong value for k2: %s", v)
}
if v := out.Get("k3"); v != "v3" {
t.Errorf("Wrong value for k3: %s", v)
}
if v := out["k1"]; !reflect.DeepEqual(v, []string{"v1", "v11"}) {
t.Errorf("Wrong multi-value for k1: %s", v)
}
}
func Test_prepareBlockListRequest(t *testing.T) {
empty := []Block{}
expected := `<?xml version="1.0" encoding="utf-8"?><BlockList></BlockList>`
if out := prepareBlockListRequest(empty); expected != out {
t.Errorf("Wrong block list. Expected: '%s', got: '%s'", expected, out)
}
blocks := []Block{{"foo", BlockStatusLatest}, {"bar", BlockStatusUncommitted}}
expected = `<?xml version="1.0" encoding="utf-8"?><BlockList><Latest>foo</Latest><Uncommitted>bar</Uncommitted></BlockList>`
if out := prepareBlockListRequest(blocks); expected != out {
t.Errorf("Wrong block list. Expected: '%s', got: '%s'", expected, out)
}
}
func Test_xmlUnmarshal(t *testing.T) {
xml := `<?xml version="1.0" encoding="utf-8"?>
<Blob>
<Name>myblob</Name>
</Blob>`
body := ioutil.NopCloser(strings.NewReader(xml))
var blob Blob
err := xmlUnmarshal(body, &blob)
if err != nil {
t.Fatal(err)
}
if blob.Name != "myblob" {
t.Fatal("Got wrong value")
}
}

View file

@ -23,6 +23,7 @@ import (
_ "github.com/docker/distribution/registry/auth/token" _ "github.com/docker/distribution/registry/auth/token"
"github.com/docker/distribution/registry/handlers" "github.com/docker/distribution/registry/handlers"
"github.com/docker/distribution/registry/listener" "github.com/docker/distribution/registry/listener"
_ "github.com/docker/distribution/registry/storage/driver/azure"
_ "github.com/docker/distribution/registry/storage/driver/filesystem" _ "github.com/docker/distribution/registry/storage/driver/filesystem"
_ "github.com/docker/distribution/registry/storage/driver/inmemory" _ "github.com/docker/distribution/registry/storage/driver/inmemory"
_ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront" _ "github.com/docker/distribution/registry/storage/driver/middleware/cloudfront"

View file

@ -155,8 +155,3 @@ To enable the [Ceph RADOS storage driver](storage-drivers/rados.md)
```sh ```sh
export DOCKER_BUILDTAGS='include_rados' export DOCKER_BUILDTAGS='include_rados'
``` ```
To enable the [Azure storage driver](storage-drivers/azure.md), use the
`include_azure` build tag.

View file

@ -22,11 +22,5 @@ The following parameters must be used to authenticate and configure the storage
* `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api]. * `container`: Name of the root storage container in which all registry data will be stored. Must comply the storage container name [requirements][create-container-api].
* `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`. * `realm`: (optional) Domain name suffix for the Storage Service API endpoint. Defaults to `core.windows.net`. For example realm for "Azure in China" would be `core.chinacloudapi.cn` and realm for "Azure Government" would be `core.usgovcloudapi.net`.
## Developing
To include this driver when building Docker Distribution, use the build tag
`include_azure`. Please see the [building documentation][building] for details.
[azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/ [azure-blob-storage]: http://azure.microsoft.com/en-us/services/storage/
[create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx [create-container-api]: https://msdn.microsoft.com/en-us/library/azure/dd179468.aspx
[building]: https://github.com/docker/distribution/blob/master/docs/building.md#optional-build-tags

View file

@ -16,7 +16,7 @@ import (
"github.com/docker/distribution/registry/storage/driver/base" "github.com/docker/distribution/registry/storage/driver/base"
"github.com/docker/distribution/registry/storage/driver/factory" "github.com/docker/distribution/registry/storage/driver/factory"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
const driverName = "azure" const driverName = "azure"
@ -68,7 +68,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
realm, ok := parameters[paramRealm] realm, ok := parameters[paramRealm]
if !ok || fmt.Sprint(realm) == "" { if !ok || fmt.Sprint(realm) == "" {
realm = azure.DefaultBaseUrl realm = azure.DefaultBaseURL
} }
return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm)) return New(fmt.Sprint(accountName), fmt.Sprint(accountKey), fmt.Sprint(container), fmt.Sprint(realm))
@ -76,7 +76,7 @@ func FromParameters(parameters map[string]interface{}) (*Driver, error) {
// New constructs a new Driver with the given Azure Storage Account credentials // New constructs a new Driver with the given Azure Storage Account credentials
func New(accountName, accountKey, container, realm string) (*Driver, error) { func New(accountName, accountKey, container, realm string) (*Driver, error) {
api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultApiVersion, true) api, err := azure.NewClient(accountName, accountKey, realm, azure.DefaultAPIVersion, true)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -89,7 +89,7 @@ func New(accountName, accountKey, container, realm string) (*Driver, error) {
} }
d := &driver{ d := &driver{
client: *blobClient, client: blobClient,
container: container} container: container}
return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil return &Driver{baseEmbed: baseEmbed{Base: base.Base{StorageDriver: d}}}, nil
} }
@ -114,7 +114,16 @@ func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
// PutContent stores the []byte content at a location designated by "path". // PutContent stores the []byte content at a location designated by "path".
func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error { func (d *driver) PutContent(ctx context.Context, path string, contents []byte) error {
return d.client.PutBlockBlob(d.container, path, ioutil.NopCloser(bytes.NewReader(contents))) if _, err := d.client.DeleteBlobIfExists(d.container, path); err != nil {
return err
}
if err := d.client.CreateBlockBlob(d.container, path); err != nil {
return err
}
bs := newAzureBlockStorage(d.client)
bw := newRandomBlobWriter(&bs, azure.MaxBlobBlockSize)
_, err := bw.WriteBlobAt(d.container, path, 0, bytes.NewReader(contents))
return err
} }
// ReadStream retrieves an io.ReadCloser for the content stored at "path" with a // ReadStream retrieves an io.ReadCloser for the content stored at "path" with a
@ -233,7 +242,7 @@ func (d *driver) List(ctx context.Context, path string) ([]string, error) {
// Move moves an object stored at sourcePath to destPath, removing the original // Move moves an object stored at sourcePath to destPath, removing the original
// object. // object.
func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error { func (d *driver) Move(ctx context.Context, sourcePath string, destPath string) error {
sourceBlobURL := d.client.GetBlobUrl(d.container, sourcePath) sourceBlobURL := d.client.GetBlobURL(d.container, sourcePath)
err := d.client.CopyBlob(d.container, destPath, sourceBlobURL) err := d.client.CopyBlob(d.container, destPath, sourceBlobURL)
if err != nil { if err != nil {
if is404(err) { if is404(err) {
@ -352,6 +361,6 @@ func (d *driver) listBlobs(container, virtPath string) ([]string, error) {
} }
func is404(err error) bool { func is404(err error) bool {
e, ok := err.(azure.StorageServiceError) e, ok := err.(azure.AzureStorageServiceError)
return ok && e.StatusCode == http.StatusNotFound return ok && e.StatusCode == http.StatusNotFound
} }

View file

@ -4,7 +4,7 @@ import (
"fmt" "fmt"
"io" "io"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
// azureBlockStorage is adaptor between azure.BlobStorageClient and // azureBlockStorage is adaptor between azure.BlobStorageClient and

View file

@ -6,7 +6,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
type StorageSimulator struct { type StorageSimulator struct {
@ -122,12 +122,12 @@ func (s *StorageSimulator) PutBlockList(container, blob string, blocks []azure.B
var blockIDs []string var blockIDs []string
for _, v := range blocks { for _, v := range blocks {
bl, ok := bb.blocks[v.Id] bl, ok := bb.blocks[v.ID]
if !ok { // check if block ID exists if !ok { // check if block ID exists
return fmt.Errorf("Block id '%s' not found", v.Id) return fmt.Errorf("Block id '%s' not found", v.ID)
} }
bl.committed = true bl.committed = true
blockIDs = append(blockIDs, v.Id) blockIDs = append(blockIDs, v.ID)
} }
// Mark all other blocks uncommitted // Mark all other blocks uncommitted

View file

@ -7,7 +7,7 @@ import (
"sync" "sync"
"time" "time"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
type blockIDGenerator struct { type blockIDGenerator struct {

View file

@ -4,7 +4,7 @@ import (
"math" "math"
"testing" "testing"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
func Test_blockIdGenerator(t *testing.T) { func Test_blockIdGenerator(t *testing.T) {

View file

@ -5,7 +5,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
// blockStorage is the interface required from a block storage service // blockStorage is the interface required from a block storage service
@ -75,7 +75,7 @@ func (r *randomBlobWriter) WriteBlobAt(container, blob string, offset int64, chu
// Use existing block list // Use existing block list
var existingBlocks []azure.Block var existingBlocks []azure.Block
for _, v := range blocks.CommittedBlocks { for _, v := range blocks.CommittedBlocks {
existingBlocks = append(existingBlocks, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) existingBlocks = append(existingBlocks, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted})
} }
blockList = append(existingBlocks, blockList...) blockList = append(existingBlocks, blockList...)
} }
@ -111,7 +111,7 @@ func (r *randomBlobWriter) writeChunkToBlocks(container, blob string, chunk io.R
if err := r.bs.PutBlock(container, blob, blockID, data); err != nil { if err := r.bs.PutBlock(container, blob, blockID, data); err != nil {
return newBlocks, nn, err return newBlocks, nn, err
} }
newBlocks = append(newBlocks, azure.Block{Id: blockID, Status: azure.BlockStatusUncommitted}) newBlocks = append(newBlocks, azure.Block{ID: blockID, Status: azure.BlockStatusUncommitted})
} }
return newBlocks, nn, nil return newBlocks, nn, nil
} }
@ -131,7 +131,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in
for _, v := range bx.CommittedBlocks { for _, v := range bx.CommittedBlocks {
blkSize := int64(v.Size) blkSize := int64(v.Size)
if o >= blkSize { // use existing block if o >= blkSize { // use existing block
left = append(left, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) left = append(left, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted})
o -= blkSize o -= blkSize
elapsed += blkSize elapsed += blkSize
} else if o > 0 { // current block needs to be splitted } else if o > 0 { // current block needs to be splitted
@ -150,7 +150,7 @@ func (r *randomBlobWriter) blocksLeftSide(container, blob string, writeOffset in
if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil {
return left, err return left, err
} }
left = append(left, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) left = append(left, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted})
break break
} }
} }
@ -177,7 +177,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i
) )
if bs > re { // take the block as is if bs > re { // take the block as is
right = append(right, azure.Block{Id: v.Name, Status: azure.BlockStatusCommitted}) right = append(right, azure.Block{ID: v.Name, Status: azure.BlockStatusCommitted})
} else if be > re { // current block needs to be splitted } else if be > re { // current block needs to be splitted
part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1) part, err := r.bs.GetSectionReader(container, blob, re+1, be-(re+1)+1)
if err != nil { if err != nil {
@ -192,7 +192,7 @@ func (r *randomBlobWriter) blocksRightSide(container, blob string, writeOffset i
if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil { if err = r.bs.PutBlock(container, blob, newBlockID, data); err != nil {
return right, err return right, err
} }
right = append(right, azure.Block{Id: newBlockID, Status: azure.BlockStatusUncommitted}) right = append(right, azure.Block{ID: newBlockID, Status: azure.BlockStatusUncommitted})
} }
elapsed += int64(v.Size) elapsed += int64(v.Size)
} }

View file

@ -9,7 +9,7 @@ import (
"strings" "strings"
"testing" "testing"
azure "github.com/MSOpenTech/azure-sdk-for-go/storage" azure "github.com/Azure/azure-sdk-for-go/storage"
) )
func TestRandomWriter_writeChunkToBlocks(t *testing.T) { func TestRandomWriter_writeChunkToBlocks(t *testing.T) {