*: switch from godep to glide

Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
Antonio Murdaca 2016-09-17 15:50:35 +02:00
parent 0d7b500cee
commit 4bc8701fc0
No known key found for this signature in database
GPG key ID: B2BEAD150DE936B9
673 changed files with 57012 additions and 46916 deletions

View file

@ -1,37 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
# never checkin from the bin file (for now)
bin/*
# Test key files
*.pem
# Cover profiles
*.out
# Editor/IDE specific files.
*.sublime-project
*.sublime-workspace

View file

@ -1,18 +0,0 @@
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>

View file

@ -1,147 +0,0 @@
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Schlesinger <aschlesinger@deis.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
Alex Chan <alex.chan@metaswitch.com>
Alex Elman <aelman@indeed.com>
Alexey Gladkov <gladkov.alexey@gmail.com>
allencloud <allen.sun@daocloud.io>
amitshukla <ashukla73@hotmail.com>
Amy Lindburg <amy.lindburg@docker.com>
Andrew Hsu <andrewhsu@acm.org>
Andrew Meredith <andymeredith@gmail.com>
Andrew T Nguyen <andrew.nguyen@docker.com>
Andrey Kostov <kostov.andrey@gmail.com>
Andy Goldstein <agoldste@redhat.com>
Anis Elleuch <vadmeste@gmail.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Antonio Murdaca <runcom@redhat.com>
Arien Holthuizen <aholthuizen@schubergphilis.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Asuka Suzuki <hello@tanksuzuki.com>
Avi Miller <avi.miller@oracle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Firshman <ben@firshman.co.uk>
bin liu <liubin0329@gmail.com>
Brian Bland <brian.bland@docker.com>
burnettk <burnettk@gmail.com>
Carson A <ca@carsonoid.net>
Chris Dillon <squarism@gmail.com>
cyli <cyli@twistedmatrix.com>
Daisuke Fujita <dtanshi45@gmail.com>
Daniel Huhn <daniel@danielhuhn.de>
Darren Shepherd <darren@rancher.com>
Dave Trombley <dave.trombley@gmail.com>
Dave Tucker <dt@docker.com>
David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
DJ Enriquez <dj.enriquez@infospace.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Eric Yang <windfarer@gmail.com>
Fabio Huser <fabio@fh1.ch>
farmerworking <farmerworking@gmail.com>
Felix Yan <felixonmars@archlinux.org>
Florentin Raud <florentin.raud@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
gabriell nascimento <gabriell@bluesoft.com.br>
Gleb Schukin <gschukin@ptsecurity.com>
harche <p.harshal@gmail.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
HuKeping <hukeping@huawei.com>
Ian Babrou <ibobrik@gmail.com>
igayoso <igayoso@gmail.com>
Jack Griffin <jackpg14@gmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jeff Nickoloff <jeff@allingeek.com>
Jessie Frazelle <jessie@docker.com>
jhaohai <jhaohai@foxmail.com>
Jianqing Wang <tsing@jianqing.org>
John Starks <jostarks@microsoft.com>
Jon Johnson <jonjohnson@google.com>
Jon Poler <jonathan.poler@apcera.com>
Jonathan Boulle <jonathanboulle@gmail.com>
Jordan Liggitt <jliggitt@redhat.com>
Josh Hawn <josh.hawn@docker.com>
Julien Fernandez <julien.fernandez@gmail.com>
Ke Xu <leonhartx.k@gmail.com>
Keerthan Mala <kmala@engineyard.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Kenny Leung <kleung@google.com>
Li Yi <denverdino@gmail.com>
Liu Hua <sdu.liu@huawei.com>
liuchang0812 <liuchang0812@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Duch <matt@learnmetrics.com>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Michael Prokop <mika@grml.org>
Michal Minar <miminar@redhat.com>
Miquel Sabaté <msabate@suse.com>
Morgan Bauer <mbauer@us.ibm.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nikita Tarasov <nikita@mygento.ru>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
Omer Cohen <git@omer.io>
Patrick Devine <patrick.devine@docker.com>
Phil Estes <estesp@linux.vnet.ibm.com>
Philip Misiowiec <philip@atlashealth.com>
Richard Scothern <richard.scothern@docker.com>
Rodolfo Carvalho <rhcarvalho@gmail.com>
Rusty Conover <rusty@luckydinosaur.com>
Sean Boran <Boran@users.noreply.github.com>
Sebastiaan van Stijn <github@gone.nl>
Serge Dubrouski <sergeyfd@gmail.com>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stefan Majewsky <stefan.majewsky@sap.com>
Stefan Weil <sw@weilnetz.de>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
Ted Reed <ted.reed@gmail.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Tonis Tiigi <tonistiigi@gmail.com>
Tony Holdstock-Brown <tony@docker.com>
Trevor Pounds <trevor.pounds@gmail.com>
Troels Thomsen <troels@thomsen.io>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
W. Trevor King <wking@tremily.us>
weiyuan.yl <weiyuan.yl@alibaba-inc.com>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yuzou <zouyu7@huawei.com>
zhouhaibing089 <zhouhaibing089@gmail.com>
姜继忠 <jizhong.jiangjz@alibaba-inc.com>

View file

@ -1,119 +0,0 @@
# Building the registry source
## Use-case
This is useful if you intend to actively work on the registry.
### Alternatives
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md).
### Gotchas
You are expected to know your way around with go & git.
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
## Build the development environment
The first prerequisite of properly building distribution targets is to have a Go
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
environment.
If a Go development environment is setup, one can use `go get` to install the
`registry` command from the current latest:
go get github.com/docker/distribution/cmd/registry
The above will install the source repository into the `GOPATH`.
Now create the directory for the registry data (this might require you to set permissions properly)
mkdir -p /var/lib/registry
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
The `registry`
binary can then be run with the following:
$ $GOPATH/bin/registry --version
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
> project, for these build instructions to work, the project must be checked
> out in the correct location in the `GOPATH`. This should almost always be
> `$GOPATH/src/github.com/docker/distribution`.
The registry can be run with the default config using the following
incantation:
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
INFO[0000] debug server listening localhost:5001
If it is working, one should see the above log messages.
### Repeatable Builds
For the full development experience, one should `cd` into
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
commands, such as `go test`, should work per package (please see
[Developing](#developing) if they don't work).
A `Makefile` has been provided as a convenience to support repeatable builds.
Please install the following into `GOPATH` for it to work:
go get github.com/tools/godep github.com/golang/lint/golint
**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly.
Once these commands are available in the `GOPATH`, run `make` to get a full
build:
$ make
+ clean
+ fmt
+ vet
+ lint
+ build
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
github.com/Sirupsen/logrus
github.com/docker/libtrust
...
github.com/yvasiyarov/gorelic
github.com/docker/distribution/registry/handlers
github.com/docker/distribution/cmd/registry
+ test
...
ok github.com/docker/distribution/digest 7.875s
ok github.com/docker/distribution/manifest 0.028s
ok github.com/docker/distribution/notifications 17.322s
? github.com/docker/distribution/registry [no test files]
ok github.com/docker/distribution/registry/api/v2 0.101s
? github.com/docker/distribution/registry/auth [no test files]
ok github.com/docker/distribution/registry/auth/silly 0.011s
...
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
+ binaries
The above provides a repeatable build using the contents of the vendored
Godeps directory. This includes formatting, vetting, linting, building,
testing and generating tagged binaries. We can verify this worked by running
the registry binary generated in the "./bin" directory:
$ ./bin/registry -version
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
### Optional build tags
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
the environment variable `DOCKER_BUILDTAGS`.

View file

@ -1,35 +0,0 @@
# Changelog
## 2.5.0 (2016-06-14)
### Storage
- Ensure uploads directory is cleaned after upload is commited
- Add ability to cap concurrent operations in filesystem driver
- S3: Add 'us-gov-west-1' to the valid region list
- Swift: Handle ceph not returning Last-Modified header for HEAD requests
- Add redirect middleware
#### Registry
- Add support for blobAccessController middleware
- Add support for layers from foreign sources
- Remove signature store
- Add support for Let's Encrypt
- Correct yaml key names in configuration
#### Client
- Add option to get content digest from manifest get
#### Spec
- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported
- Clarify API documentation around catalog fetch behavior
### API
- Support returning HTTP 429 (Too Many Requests)
### Documentation
- Update auth documentation examples to show "expires in" as int
### Docker Image
- Use Alpine Linux as base image

View file

@ -1,140 +0,0 @@
# Contributing to the registry
## Before reporting an issue...
### If your problem is with...
- automated builds
- your account on the [Docker Hub](https://hub.docker.com/)
- any other [Docker Hub](https://hub.docker.com/) issue
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
### If you...
- need help setting up your registry
- can't figure out something
- are not sure what's going on or what your problem is
Then please do not open an issue here yet - you should first try one of the following support forums:
- irc: #docker-distribution on freenode
- mailing-list: <distribution@dockerproject.org> or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
- __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue.
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Private registry push fail: 400 error with E_INVALID_DIGEST"
2. copy the output of:
- `docker version`
- `docker info`
- `docker exec <registry-container> registry -version`
3. copy the command line you used to launch your Registry
4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments)
5. reproduce your problem and get your docker daemon logs showing the error
6. if relevant, copy your registry logs that show the error
7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used)
8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry
## Contributing a patch for a known bug, or a small correction
You should follow the basic GitHub workflow:
1. fork
2. commit a change
3. make sure the tests pass
4. PR
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- sign your commits using `-s`: `git commit -s -m "My commit"`
Some simple rules to ensure quick merge:
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- if you need to amend your PR following comments, please squash instead of adding more commits
## Contributing new features
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work.
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
It's mandatory to:
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- address maintainers' comments and modify your submission accordingly
- write tests for any new code
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493)
## Coding Style
Unless explicitly stated, we follow all coding guidelines from the Go
community. While some of these standards may seem arbitrary, they somehow seem
to result in a solid, consistent codebase.
It is possible that the code base does not currently comply with these
guidelines. We are not looking for a massive PR that fixes this, since that
goes against the spirit of the guidelines. All new contributions should make a
best effort to clean up and make the code base better than they left it.
Obviously, apply your best judgement. Remember, the goal here is to make the
code base easier for humans to navigate and understand. Always keep that in
mind when nudging others to comply.
The rules:
1. All code should be formatted with `gofmt -s`.
2. All code should pass the default levels of
[`golint`](https://github.com/golang/lint).
3. All code should follow the guidelines covered in [Effective
Go](http://golang.org/doc/effective_go.html) and [Go Code Review
Comments](https://github.com/golang/go/wiki/CodeReviewComments).
4. Comment the code. Tell us the why, the history and the context.
5. Document _all_ declarations and methods, even private ones. Declare
expectations, caveats and anything else that may be important. If a type
gets exported, having the comments already there will ensure it's ready.
6. Variable name length should be proportional to its context and no longer.
`noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`.
In practice, short methods will have short variable names and globals will
have longer names.
7. No underscores in package names. If you need a compound name, step back,
and re-examine why you need a compound name. If you still think you need a
compound name, lose the underscore.
8. No utils or helpers packages. If a function is not general enough to
warrant its own package, it has not been written generally enough to be a
part of a util package. Just leave it unexported and well-documented.
9. All tests should run with `go test` and outside tooling should not be
required. No, we don't need another unit testing framework. Assertion
packages are acceptable if they provide _real_ incremental value.
10. Even though we call these "rules" above, they are actually just
guidelines. Since you've read all the rules, you now know that.
If you are having trouble getting into the mood of idiomatic Go, we recommend
reading through [Effective Go](http://golang.org/doc/effective_go.html). The
[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the
kool-aid is a lot easier than going thirsty.

View file

@ -1,18 +0,0 @@
FROM golang:1.6-alpine
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
ENV DOCKER_BUILDTAGS include_oss include_gcs
WORKDIR $DISTRIBUTION_DIR
COPY . $DISTRIBUTION_DIR
COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml
RUN set -ex \
&& apk add --no-cache make git
RUN make PREFIX=/go clean binaries
VOLUME ["/var/lib/registry"]
EXPOSE 5000
ENTRYPOINT ["registry"]
CMD ["serve", "/etc/docker/registry/config.yml"]

View file

@ -199,4 +199,3 @@ Apache License
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -1,58 +0,0 @@
# Distribution maintainers file
#
# This file describes who runs the docker/distribution project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"aaronlehmann",
"dmcgowan",
"dmp42",
"richardscothern",
"shykes",
"stevvooe",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.aaronlehmann]
Name = "Aaron Lehmann"
Email = "aaron.lehmann@docker.com"
GitHub = "aaronlehmann"
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@mcgstyle.net"
GitHub = "dmcgowan"
[people.dmp42]
Name = "Olivier Gambier"
Email = "olivier@docker.com"
GitHub = "dmp42"
[people.richardscothern]
Name = "Richard Scothern"
Email = "richard.scothern@gmail.com"
GitHub = "richardscothern"
[people.shykes]
Name = "Solomon Hykes"
Email = "solomon@docker.com"
GitHub = "shykes"
[people.stevvooe]
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"

View file

@ -1,106 +0,0 @@
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
# Used to populate version variable in main package.
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
# Allow turning off function inlining and variable registerization
ifeq (${DISABLE_OPTIMIZATION},true)
GO_GCFLAGS=-gcflags "-N -l"
VERSION:="$(VERSION)-noopt"
endif
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
.PHONY: clean all fmt vet lint build test binaries
.DEFAULT: all
all: fmt vet lint build test binaries
AUTHORS: .mailmap .git/HEAD
git log --format='%aN <%aE>' | sort -fu > $@
# This only needs to be generated by hand when cutting full releases.
version/version.go:
./version/version.sh > $@
# Required for go 1.5 to build
GO15VENDOREXPERIMENT := 1
# Package list
PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/)
# Resolving binary dependencies for specific targets
GOLINT := $(shell which golint || echo '')
GODEP := $(shell which godep || echo '')
${PREFIX}/bin/registry: $(wildcard **/*.go)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry
${PREFIX}/bin/digest: $(wildcard **/*.go)
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest
${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go)
@echo "+ $@"
@go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template
docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template
./bin/registry-api-descriptor-template $< > $@
vet:
@echo "+ $@"
@go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS)
fmt:
@echo "+ $@"
@test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \
(echo >&2 "+ please format Go code with 'gofmt -s'" && false)
lint:
@echo "+ $@"
$(if $(GOLINT), , \
$(error Please install golint: `go get -u github.com/golang/lint/golint`))
@test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)"
build:
@echo "+ $@"
@go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
test:
@echo "+ $@"
@go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS)
test-full:
@echo "+ $@"
@go test -tags "${DOCKER_BUILDTAGS}" $(PKGS)
binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template
@echo "+ $@"
clean:
@echo "+ $@"
@rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template"
dep-save:
@echo "+ $@"
$(if $(GODEP), , \
$(error Please install godep: go get github.com/tools/godep))
@$(GODEP) save $(PKGS)
dep-restore:
@echo "+ $@"
$(if $(GODEP), , \
$(error Please install godep: go get github.com/tools/godep))
@$(GODEP) restore -v
dep-validate: dep-restore
@echo "+ $@"
@rm -Rf .vendor.bak
@mv vendor .vendor.bak
@rm -Rf Godeps
@$(GODEP) save ./...
@test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \
(echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false)
@rm -Rf .vendor.bak

View file

@ -1,131 +0,0 @@
# Distribution
The Docker toolset to pack, ship, store, and deliver content.
This repository's main product is the Docker Registry 2.0 implementation
for storing and distributing Docker images. It supersedes the
[docker/docker-registry](https://github.com/docker/docker-registry)
project with a new API design, focused around security and performance.
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
This repository contains the following components:
|**Component** |Description |
|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. |
| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. |
| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) |
| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. |
### How does this integrate with Docker engine?
This project should provide an implementation to a V2 API for use in the [Docker
core project](https://github.com/docker/docker). The API should be embeddable
and simplify the process of securely pulling and pushing content from `docker`
daemons.
### What are the long term goals of the Distribution project?
The _Distribution_ project has the further long term goal of providing a
secure tool chain for distributing content. The specifications, APIs and tools
should be as useful with Docker as they are without.
Our goal is to design a professional grade and extensible content distribution
system that allow users to:
* Enjoy an efficient, secured and reliable way to store, manage, package and
exchange content
* Hack/roll their own on top of healthy open-source components
* Implement their own home made solution through good specs, and solid
extensions mechanism.
## More about Registry 2.0
The new registry implementation provides the following benefits:
- faster push and pull
- new, more efficient implementation
- simplified deployment
- pluggable storage backend
- webhook notifications
For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md).
### Who needs to deploy a registry?
By default, Docker users pull images from Docker's public registry instance.
[Installing Docker](https://docs.docker.com/engine/installation/) gives users this
ability. Users can also push images to a repository on Docker's public registry,
if they have a [Docker Hub](https://hub.docker.com/) account.
For some users and even companies, this default behavior is sufficient. For
others, it is not.
For example, users with their own software products may want to maintain a
registry for private, company images. Also, you may wish to deploy your own
image repository for images used to test or in continuous integration. For these
use cases and others, [deploying your own registry instance](docs/deploying.md)
may be the better choice.
### Migration to Registry 2.0
For those who have previously deployed their own registry based on the Registry
1.0 implementation and wish to deploy a Registry 2.0 while retaining images,
data migration is required. A tool to assist with migration efforts has been
created. For more information see [docker/migrator]
(https://github.com/docker/migrator).
## Contribute
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute
issues, fixes, and patches to this project. If you are contributing code, see
the instructions for [building a development environment](docs/recipes/building.md).
## Support
If any issues are encountered while using the _Distribution_ project, several
avenues are available for support:
<table>
<tr>
<th align="left">
IRC
</th>
<td>
#docker-distribution on FreeNode
</td>
</tr>
<tr>
<th align="left">
Issue Tracker
</th>
<td>
github.com/docker/distribution/issues
</td>
</tr>
<tr>
<th align="left">
Google Groups
</th>
<td>
https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution
</td>
</tr>
<tr>
<th align="left">
Mailing List
</th>
<td>
docker@dockerproject.org
</td>
</tr>
</table>
## License
This project is distributed under [Apache License, Version 2.0](LICENSE).

View file

@ -1,267 +0,0 @@
# Roadmap
The Distribution Project consists of several components, some of which are
still being defined. This document defines the high-level goals of the
project, identifies the current components, and defines the release-
relationship to the Docker Platform.
* [Distribution Goals](#distribution-goals)
* [Distribution Components](#distribution-components)
* [Project Planning](#project-planning): release-relationship to the Docker Platform.
This road map is a living document, providing an overview of the goals and
considerations made in respect of the future of the project.
## Distribution Goals
- Replace the existing [docker registry](github.com/docker/docker-registry)
implementation as the primary implementation.
- Replace the existing push and pull code in the docker engine with the
distribution package.
- Define a strong data model for distributing docker images
- Provide a flexible distribution tool kit for use in the docker platform
- Unlock new distribution models
## Distribution Components
Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming
features and bugfixes for a component will be added to the relevant milestone. If a feature or
bugfix is not part of a milestone, it is currently unscheduled for
implementation.
* [Registry](#registry)
* [Distribution Package](#distribution-package)
***
### Registry
The new Docker registry is the main portion of the distribution repository.
Registry 2.0 is the first release of the next-generation registry. This was
primarily focused on implementing the [new registry
API](https://github.com/docker/distribution/blob/master/docs/spec/api.md),
with a focus on security and performance.
Following from the Distribution project goals above, we have a set of goals
for registry v2 that we would like to follow in the design. New features
should be compared against these goals.
#### Data Storage and Distribution First
The registry's first goal is to provide a reliable, consistent storage
location for Docker images. The registry should only provide the minimal
amount of indexing required to fetch image data and no more.
This means we should be selective in new features and API additions, including
those that may require expensive, ever growing indexes. Requests should be
servable in "constant time".
#### Content Addressability
All data objects used in the registry API should be content addressable.
Content identifiers should be secure and verifiable. This provides a secure,
reliable base from which to build more advanced content distribution systems.
#### Content Agnostic
In the past, changes to the image format would require large changes in Docker
and the Registry. By decoupling the distribution and image format, we can
allow the formats to progress without having to coordinate between the two.
This means that we should be focused on decoupling Docker from the registry
just as much as decoupling the registry from Docker. Such an approach will
allow us to unlock new distribution models that haven't been possible before.
We can take this further by saying that the new registry should be content
agnostic. The registry provides a model of names, tags, manifests and content
addresses and that model can be used to work with content.
#### Simplicity
The new registry should be closer to a microservice component than its
predecessor. This means it should have a narrower API and a low number of
service dependencies. It should be easy to deploy.
This means that other solutions should be explored before changing the API or
adding extra dependencies. If functionality is required, can it be added as an
extension or companion service.
#### Extensibility
The registry should provide extension points to add functionality. By keeping
the scope narrow, but providing the ability to add functionality.
Features like search, indexing, synchronization and registry explorers fall
into this category. No such feature should be added unless we've found it
impossible to do through an extension.
#### Active Feature Discussions
The following are feature discussions that are currently active.
If you don't see your favorite, unimplemented feature, feel free to contact us
via IRC or the mailing list and we can talk about adding it. The goal here is
to make sure that new features go through a rigid design process before
landing in the registry.
##### Proxying to other Registries
A _pull-through caching_ mode exists for the registry, but is restricted from
within the docker client to only mirror the official Docker Hub. This functionality
can be expanded when image provenance has been specified and implemented in the
distribution project.
##### Metadata storage
Metadata for the registry is currently stored with the manifest and layer data on
the storage backend. While this is a big win for simplicity and reliably maintaining
state, it comes with the cost of consistency and high latency. The mutable registry
metadata operations should be abstracted behind an API which will allow ACID compliant
storage systems to handle metadata.
##### Peer to Peer transfer
Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit
##### Indexing, Search and Discovery
The original registry provided some implementation of search for use with
private registries. Support has been elided from V2 since we'd like to both
decouple search functionality from the registry. The makes the registry
simpler to deploy, especially in use cases where search is not needed, and
let's us decouple the image format from the registry.
There are explorations into using the catalog API and notification system to
build external indexes. The current line of thought is that we will define a
common search API to index and query docker images. Such a system could be run
as a companion to a registry or set of registries to power discovery.
The main issue with search and discovery is that there are so many ways to
accomplish it. There are two aspects to this project. The first is deciding on
how it will be done, including an API definition that can work with changing
data formats. The second is the process of integrating with `docker search`.
We expect that someone attempts to address the problem with the existing tools
and propose it as a standard search API or uses it to inform a standardization
process. Once this has been explored, we integrate with the docker client.
Please see the following for more detail:
- https://github.com/docker/distribution/issues/206
##### Deletes
> __NOTE:__ Deletes are a much asked for feature. Before requesting this
feature or participating in discussion, we ask that you read this section in
full and understand the problems behind deletes.
While, at first glance, implementing deleting seems simple, there are a number
mitigating factors that make many solutions not ideal or even pathological in
the context of a registry. The following paragraph discuss the background and
approaches that could be applied to arrive at a solution.
The goal of deletes in any system is to remove unused or unneeded data. Only
data requested for deletion should be removed and no other data. Removing
unintended data is worse than _not_ removing data that was requested for
removal but ideally, both are supported. Generally, according to this rule, we
err on holding data longer than needed, ensuring that it is only removed when
we can be certain that it can be removed. With the current behavior, we opt to
hold onto the data forever, ensuring that data cannot be incorrectly removed.
To understand the problems with implementing deletes, one must understand the
data model. All registry data is stored in a filesystem layout, implemented on
a "storage driver", effectively a _virtual file system_ (VFS). The storage
system must assume that this VFS layer will be eventually consistent and has
poor read- after-write consistency, since this is the lower common denominator
among the storage drivers. This is mitigated by writing values in reverse-
dependent order, but makes wider transactional operations unsafe.
Layered on the VFS model is a content-addressable _directed, acyclic graph_
(DAG) made up of blobs. Manifests reference layers. Tags reference manifests.
Since the same data can be referenced by multiple manifests, we only store
data once, even if it is in different repositories. Thus, we have a set of
blobs, referenced by tags and manifests. If we want to delete a blob we need
to be certain that it is no longer referenced by another manifest or tag. When
we delete a manifest, we also can try to delete the referenced blobs. Deciding
whether or not a blob has an active reference is the crux of the problem.
Conceptually, deleting a manifest and its resources is quite simple. Just find
all the manifests, enumerate the referenced blobs and delete the blobs not in
that set. An astute observer will recognize this as a garbage collection
problem. As with garbage collection in programming languages, this is very
simple when one always has a consistent view. When one adds parallelism and an
inconsistent view of data, it becomes very challenging.
A simple example can demonstrate this. Let's say we are deleting a manifest
_A_ in one process. We scan the manifest and decide that all the blobs are
ready for deletion. Concurrently, we have another process accepting a new
manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_
is accepted and all the blobs are considered present, so the operation
proceeds. The original process then deletes the referenced blobs, assuming
they were unreferenced. The manifest _B_, which we thought had all of its data
present, can no longer be served by the registry, since the dependent data has
been deleted.
Deleting data from the registry safely requires some way to coordinate this
operation. The following approaches are being considered:
- _Reference Counting_ - Maintain a count of references to each blob. This is
challenging for a number of reasons: 1. maintaining a consistent consensus
of reference counts across a set of Registries and 2. Building the initial
list of reference counts for an existing registry. These challenges can be
met with a consensus protocol like Paxos or Raft in the first case and a
necessary but simple scan in the second..
- _Lock the World GC_ - Halt all writes to the data store. Walk the data store
and find all blob references. Delete all unreferenced blobs. This approach
is very simple but requires disabling writes for a period of time while the
service reads all data. This is slow and expensive but very accurate and
effective.
- _Generational GC_ - Do something similar to above but instead of blocking
writes, writes are sent to another storage backend while reads are broadcast
to the new and old backends. GC is then performed on the read-only portion.
Because writes land in the new backend, the data in the read-only section
can be safely deleted. The main drawbacks of this approach are complexity
and coordination.
- _Centralized Oracle_ - Using a centralized, transactional database, we can
know exactly which data is referenced at any given time. This avoids
coordination problem by managing this data in a single location. We trade
off metadata scalability for simplicity and performance. This is a very good
option for most registry deployments. This would create a bottleneck for
registry metadata. However, metadata is generally not the main bottleneck
when serving images.
Please let us know if other solutions exist that we have yet to enumerate.
Note that for any approach, implementation is a massive consideration. For
example, a mark-sweep based solution may seem simple but the amount of work in
coordination offset the extra work it might take to build a _Centralized
Oracle_. We'll accept proposals for any solution but please coordinate with us
before dropping code.
At this time, we have traded off simplicity and ease of deployment for disk
space. Simplicity and ease of deployment tend to reduce developer involvement,
which is currently the most expensive resource in software engineering. Taking
on any solution for deletes will greatly effect these factors, trading off
very cheap disk space for a complex deployment and operational story.
Please see the following issues for more detail:
- https://github.com/docker/distribution/issues/422
- https://github.com/docker/distribution/issues/461
- https://github.com/docker/distribution/issues/462
### Distribution Package
At its core, the Distribution Project is a set of Go packages that make up
Distribution Components. At this time, most of these packages make up the
Registry implementation.
The package itself is considered unstable. If you're using it, please take care to vendor the dependent version.
For feature additions, please see the Registry section. In the future, we may break out a
separate Roadmap for distribution-specific features that apply to more than
just the registry.
***
### Project Planning
An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress.

View file

@ -69,9 +69,6 @@ type Descriptor struct {
// against against this digest.
Digest digest.Digest `json:"digest,omitempty"`
// URLs contains the source URLs of this content.
URLs []string `json:"urls,omitempty"`
// NOTE: Before adding a field here, please ensure that all
// other options have been exhausted. Much of the type relationships
// depend on the simplicity of this type.
@ -127,11 +124,6 @@ type BlobDescriptorService interface {
Clear(ctx context.Context, dgst digest.Digest) error
}
// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService.
type BlobDescriptorServiceFactory interface {
BlobAccessController(svc BlobDescriptorService) BlobDescriptorService
}
// ReadSeekCloser is the primary reader type for blob data, combining
// io.ReadSeeker with io.Closer.
type ReadSeekCloser interface {

View file

@ -1,89 +0,0 @@
# Pony-up!
machine:
pre:
# Install gvm
- bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer)
# Install codecov for coverage
- pip install --user codecov
post:
# go
- gvm install go1.6 --prefer-binary --name=stable
environment:
# Convenient shortcuts to "common" locations
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
# Trick circle brainflat "no absolute path" behavior
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
DOCKER_BUILDTAGS: "include_oss include_gcs"
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
CIRCLE_PAIN: "mode: set"
hosts:
# Not used yet
fancy: 127.0.0.1
dependencies:
pre:
# Copy the code to the gopath of all go versions
- >
gvm use stable &&
mkdir -p "$(dirname $BASE_STABLE)" &&
cp -R "$CHECKOUT" "$BASE_STABLE"
override:
# Install dependencies for every copied clone/go version
- gvm use stable && go get github.com/tools/godep:
pwd: $BASE_STABLE
post:
# For the stable go version, additionally install linting tools
- >
gvm use stable &&
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
test:
pre:
# Output the go versions we are going to test
# - gvm use old && go version
- gvm use stable && go version
# Ensure validation of dependencies
- gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi:
pwd: $BASE_STABLE
# First thing: build everything. This will catch compile errors, and it's
# also necessary for go vet to work properly (see #807).
- gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"):
pwd: $BASE_STABLE
# FMT
- gvm use stable && make fmt:
pwd: $BASE_STABLE
# VET
- gvm use stable && make vet:
pwd: $BASE_STABLE
# LINT
- gvm use stable && make lint:
pwd: $BASE_STABLE
override:
# Test stable, and report
- gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE':
timeout: 600
pwd: $BASE_STABLE
post:
# Report to codecov
- bash <(curl -s https://codecov.io/bash):
pwd: $BASE_STABLE
## Notes
# Disabled the -race detector due to massive memory usage.
# Do we want these as well?
# - go get code.google.com/p/go.tools/cmd/goimports
# - test -z "$(goimports -l -w ./... | tee /dev/stderr)"
# http://labix.org/gocheck

View file

@ -1,85 +0,0 @@
package context
import (
"sync"
"github.com/docker/distribution/uuid"
"golang.org/x/net/context"
)
// Context is a copy of Context from the golang.org/x/net/context package.
type Context interface {
context.Context
}
// instanceContext is a context that provides only an instance id. It is
// provided as the main background context.
type instanceContext struct {
Context
id string // id of context, logged as "instance.id"
once sync.Once // once protect generation of the id
}
func (ic *instanceContext) Value(key interface{}) interface{} {
if key == "instance.id" {
ic.once.Do(func() {
// We want to lazy initialize the UUID such that we don't
// call a random generator from the package initialization
// code. For various reasons random could not be available
// https://github.com/docker/distribution/issues/782
ic.id = uuid.Generate().String()
})
return ic.id
}
return ic.Context.Value(key)
}
var background = &instanceContext{
Context: context.Background(),
}
// Background returns a non-nil, empty Context. The background context
// provides a single key, "instance.id" that is globally unique to the
// process.
func Background() Context {
return background
}
// WithValue returns a copy of parent in which the value associated with key is
// val. Use context Values only for request-scoped data that transits processes
// and APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}
// stringMapContext is a simple context implementation that checks a map for a
// key, falling back to a parent if not present.
type stringMapContext struct {
context.Context
m map[string]interface{}
}
// WithValues returns a context that proxies lookups through a map. Only
// supports string keys.
func WithValues(ctx context.Context, m map[string]interface{}) context.Context {
mo := make(map[string]interface{}, len(m)) // make our own copy.
for k, v := range m {
mo[k] = v
}
return stringMapContext{
Context: ctx,
m: mo,
}
}
func (smc stringMapContext) Value(key interface{}) interface{} {
if ks, ok := key.(string); ok {
if v, ok := smc.m[ks]; ok {
return v
}
}
return smc.Context.Value(key)
}

View file

@ -1,89 +0,0 @@
// Package context provides several utilities for working with
// golang.org/x/net/context in http requests. Primarily, the focus is on
// logging relevant request information but this package is not limited to
// that purpose.
//
// The easiest way to get started is to get the background context:
//
// ctx := context.Background()
//
// The returned context should be passed around your application and be the
// root of all other context instances. If the application has a version, this
// line should be called before anything else:
//
// ctx := context.WithVersion(context.Background(), version)
//
// The above will store the version in the context and will be available to
// the logger.
//
// Logging
//
// The most useful aspect of this package is GetLogger. This function takes
// any context.Context interface and returns the current logger from the
// context. Canonical usage looks like this:
//
// GetLogger(ctx).Infof("something interesting happened")
//
// GetLogger also takes optional key arguments. The keys will be looked up in
// the context and reported with the logger. The following example would
// return a logger that prints the version with each log message:
//
// ctx := context.Context(context.Background(), "version", version)
// GetLogger(ctx, "version").Infof("this log message has a version field")
//
// The above would print out a log message like this:
//
// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m
//
// When used with WithLogger, we gain the ability to decorate the context with
// loggers that have information from disparate parts of the call stack.
// Following from the version example, we can build a new context with the
// configured logger such that we always print the version field:
//
// ctx = WithLogger(ctx, GetLogger(ctx, "version"))
//
// Since the logger has been pushed to the context, we can now get the version
// field for free with our log messages. Future calls to GetLogger on the new
// context will have the version field:
//
// GetLogger(ctx).Infof("this log message has a version field")
//
// This becomes more powerful when we start stacking loggers. Let's say we
// have the version logger from above but also want a request id. Using the
// context above, in our request scoped function, we place another logger in
// the context:
//
// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context
// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id"))
//
// When GetLogger is called on the new context, "http.request.id" will be
// included as a logger field, along with the original "version" field:
//
// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m
//
// Note that this only affects the new context, the previous context, with the
// version field, can be used independently. Put another way, the new logger,
// added to the request context, is unique to that context and can have
// request scoped varaibles.
//
// HTTP Requests
//
// This package also contains several methods for working with http requests.
// The concepts are very similar to those described above. We simply place the
// request in the context using WithRequest. This makes the request variables
// available. GetRequestLogger can then be called to get request specific
// variables in a log line:
//
// ctx = WithRequest(ctx, req)
// GetRequestLogger(ctx).Infof("request variables")
//
// Like above, if we want to include the request data in all log messages in
// the context, we push the logger to a new context and use that one:
//
// ctx = WithLogger(ctx, GetRequestLogger(ctx))
//
// The concept is fairly powerful and ensures that calls throughout the stack
// can be traced in log messages. Using the fields like "http.request.id", one
// can analyze call flow for a particular request with a simple grep of the
// logs.
package context

View file

@ -1,366 +0,0 @@
package context
import (
"errors"
"net"
"net/http"
"strings"
"sync"
"time"
log "github.com/Sirupsen/logrus"
"github.com/docker/distribution/uuid"
"github.com/gorilla/mux"
)
// Common errors used with this package.
var (
ErrNoRequestContext = errors.New("no http request in context")
ErrNoResponseWriterContext = errors.New("no http response in context")
)
func parseIP(ipStr string) net.IP {
ip := net.ParseIP(ipStr)
if ip == nil {
log.Warnf("invalid remote IP address: %q", ipStr)
}
return ip
}
// RemoteAddr extracts the remote address of the request, taking into
// account proxy headers.
func RemoteAddr(r *http.Request) string {
if prior := r.Header.Get("X-Forwarded-For"); prior != "" {
proxies := strings.Split(prior, ",")
if len(proxies) > 0 {
remoteAddr := strings.Trim(proxies[0], " ")
if parseIP(remoteAddr) != nil {
return remoteAddr
}
}
}
// X-Real-Ip is less supported, but worth checking in the
// absence of X-Forwarded-For
if realIP := r.Header.Get("X-Real-Ip"); realIP != "" {
if parseIP(realIP) != nil {
return realIP
}
}
return r.RemoteAddr
}
// RemoteIP extracts the remote IP of the request, taking into
// account proxy headers.
func RemoteIP(r *http.Request) string {
addr := RemoteAddr(r)
// Try parsing it as "IP:port"
if ip, _, err := net.SplitHostPort(addr); err == nil {
return ip
}
return addr
}
// WithRequest places the request on the context. The context of the request
// is assigned a unique id, available at "http.request.id". The request itself
// is available at "http.request". Other common attributes are available under
// the prefix "http.request.". If a request is already present on the context,
// this method will panic.
func WithRequest(ctx Context, r *http.Request) Context {
if ctx.Value("http.request") != nil {
// NOTE(stevvooe): This needs to be considered a programming error. It
// is unlikely that we'd want to have more than one request in
// context.
panic("only one request per context")
}
return &httpRequestContext{
Context: ctx,
startedAt: time.Now(),
id: uuid.Generate().String(),
r: r,
}
}
// GetRequest returns the http request in the given context. Returns
// ErrNoRequestContext if the context does not have an http request associated
// with it.
func GetRequest(ctx Context) (*http.Request, error) {
if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok {
return r, nil
}
return nil, ErrNoRequestContext
}
// GetRequestID attempts to resolve the current request id, if possible. An
// error is return if it is not available on the context.
func GetRequestID(ctx Context) string {
return GetStringValue(ctx, "http.request.id")
}
// WithResponseWriter returns a new context and response writer that makes
// interesting response statistics available within the context.
func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) {
if closeNotifier, ok := w.(http.CloseNotifier); ok {
irwCN := &instrumentedResponseWriterCN{
instrumentedResponseWriter: instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
},
CloseNotifier: closeNotifier,
}
return irwCN, irwCN
}
irw := instrumentedResponseWriter{
ResponseWriter: w,
Context: ctx,
}
return &irw, &irw
}
// GetResponseWriter returns the http.ResponseWriter from the provided
// context. If not present, ErrNoResponseWriterContext is returned. The
// returned instance provides instrumentation in the context.
func GetResponseWriter(ctx Context) (http.ResponseWriter, error) {
v := ctx.Value("http.response")
rw, ok := v.(http.ResponseWriter)
if !ok || rw == nil {
return nil, ErrNoResponseWriterContext
}
return rw, nil
}
// getVarsFromRequest let's us change request vars implementation for testing
// and maybe future changes.
var getVarsFromRequest = mux.Vars
// WithVars extracts gorilla/mux vars and makes them available on the returned
// context. Variables are available at keys with the prefix "vars.". For
// example, if looking for the variable "name", it can be accessed as
// "vars.name". Implementations that are accessing values need not know that
// the underlying context is implemented with gorilla/mux vars.
func WithVars(ctx Context, r *http.Request) Context {
return &muxVarsContext{
Context: ctx,
vars: getVarsFromRequest(r),
}
}
// GetRequestLogger returns a logger that contains fields from the request in
// the current context. If the request is not available in the context, no
// fields will display. Request loggers can safely be pushed onto the context.
func GetRequestLogger(ctx Context) Logger {
return GetLogger(ctx,
"http.request.id",
"http.request.method",
"http.request.host",
"http.request.uri",
"http.request.referer",
"http.request.useragent",
"http.request.remoteaddr",
"http.request.contenttype")
}
// GetResponseLogger reads the current response stats and builds a logger.
// Because the values are read at call time, pushing a logger returned from
// this function on the context will lead to missing or invalid data. Only
// call this at the end of a request, after the response has been written.
func GetResponseLogger(ctx Context) Logger {
l := getLogrusLogger(ctx,
"http.response.written",
"http.response.status",
"http.response.contenttype")
duration := Since(ctx, "http.request.startedat")
if duration > 0 {
l = l.WithField("http.response.duration", duration.String())
}
return l
}
// httpRequestContext makes information about a request available to context.
type httpRequestContext struct {
Context
startedAt time.Time
id string
r *http.Request
}
// Value returns a keyed element of the request for use in the context. To get
// the request itself, query "request". For other components, access them as
// "request.<component>". For example, r.RequestURI
func (ctx *httpRequestContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.request" {
return ctx.r
}
if !strings.HasPrefix(keyStr, "http.request.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
switch parts[2] {
case "uri":
return ctx.r.RequestURI
case "remoteaddr":
return RemoteAddr(ctx.r)
case "method":
return ctx.r.Method
case "host":
return ctx.r.Host
case "referer":
referer := ctx.r.Referer()
if referer != "" {
return referer
}
case "useragent":
return ctx.r.UserAgent()
case "id":
return ctx.id
case "startedat":
return ctx.startedAt
case "contenttype":
ct := ctx.r.Header.Get("Content-Type")
if ct != "" {
return ct
}
}
}
fallback:
return ctx.Context.Value(key)
}
type muxVarsContext struct {
Context
vars map[string]string
}
func (ctx *muxVarsContext) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "vars" {
return ctx.vars
}
if strings.HasPrefix(keyStr, "vars.") {
keyStr = strings.TrimPrefix(keyStr, "vars.")
}
if v, ok := ctx.vars[keyStr]; ok {
return v
}
}
return ctx.Context.Value(key)
}
// instrumentedResponseWriterCN provides response writer information in a
// context. It implements http.CloseNotifier so that users can detect
// early disconnects.
type instrumentedResponseWriterCN struct {
instrumentedResponseWriter
http.CloseNotifier
}
// instrumentedResponseWriter provides response writer information in a
// context. This variant is only used in the case where CloseNotifier is not
// implemented by the parent ResponseWriter.
type instrumentedResponseWriter struct {
http.ResponseWriter
Context
mu sync.Mutex
status int
written int64
}
func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) {
n, err = irw.ResponseWriter.Write(p)
irw.mu.Lock()
irw.written += int64(n)
// Guess the likely status if not set.
if irw.status == 0 {
irw.status = http.StatusOK
}
irw.mu.Unlock()
return
}
func (irw *instrumentedResponseWriter) WriteHeader(status int) {
irw.ResponseWriter.WriteHeader(status)
irw.mu.Lock()
irw.status = status
irw.mu.Unlock()
}
func (irw *instrumentedResponseWriter) Flush() {
if flusher, ok := irw.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
}
}
func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
if !strings.HasPrefix(keyStr, "http.response.") {
goto fallback
}
parts := strings.Split(keyStr, ".")
if len(parts) != 3 {
goto fallback
}
irw.mu.Lock()
defer irw.mu.Unlock()
switch parts[2] {
case "written":
return irw.written
case "status":
return irw.status
case "contenttype":
contentType := irw.Header().Get("Content-Type")
if contentType != "" {
return contentType
}
}
}
fallback:
return irw.Context.Value(key)
}
func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} {
if keyStr, ok := key.(string); ok {
if keyStr == "http.response" {
return irw
}
}
return irw.instrumentedResponseWriter.Value(key)
}

View file

@ -1,116 +0,0 @@
package context
import (
"fmt"
"github.com/Sirupsen/logrus"
"runtime"
)
// Logger provides a leveled-logging interface.
type Logger interface {
// standard logger methods
Print(args ...interface{})
Printf(format string, args ...interface{})
Println(args ...interface{})
Fatal(args ...interface{})
Fatalf(format string, args ...interface{})
Fatalln(args ...interface{})
Panic(args ...interface{})
Panicf(format string, args ...interface{})
Panicln(args ...interface{})
// Leveled methods, from logrus
Debug(args ...interface{})
Debugf(format string, args ...interface{})
Debugln(args ...interface{})
Error(args ...interface{})
Errorf(format string, args ...interface{})
Errorln(args ...interface{})
Info(args ...interface{})
Infof(format string, args ...interface{})
Infoln(args ...interface{})
Warn(args ...interface{})
Warnf(format string, args ...interface{})
Warnln(args ...interface{})
}
// WithLogger creates a new context with provided logger.
func WithLogger(ctx Context, logger Logger) Context {
return WithValue(ctx, "logger", logger)
}
// GetLoggerWithField returns a logger instance with the specified field key
// and value without affecting the context. Extra specified keys will be
// resolved from the context.
func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value)
}
// GetLoggerWithFields returns a logger instance with the specified fields
// without affecting the context. Extra specified keys will be resolved from
// the context.
func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger {
// must convert from interface{} -> interface{} to string -> interface{} for logrus.
lfields := make(logrus.Fields, len(fields))
for key, value := range fields {
lfields[fmt.Sprint(key)] = value
}
return getLogrusLogger(ctx, keys...).WithFields(lfields)
}
// GetLogger returns the logger from the current context, if present. If one
// or more keys are provided, they will be resolved on the context and
// included in the logger. While context.Value takes an interface, any key
// argument passed to GetLogger will be passed to fmt.Sprint when expanded as
// a logging key field. If context keys are integer constants, for example,
// its recommended that a String method is implemented.
func GetLogger(ctx Context, keys ...interface{}) Logger {
return getLogrusLogger(ctx, keys...)
}
// GetLogrusLogger returns the logrus logger for the context. If one more keys
// are provided, they will be resolved on the context and included in the
// logger. Only use this function if specific logrus functionality is
// required.
func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry {
var logger *logrus.Entry
// Get a logger, if it is present.
loggerInterface := ctx.Value("logger")
if loggerInterface != nil {
if lgr, ok := loggerInterface.(*logrus.Entry); ok {
logger = lgr
}
}
if logger == nil {
fields := logrus.Fields{}
// Fill in the instance id, if we have it.
instanceID := ctx.Value("instance.id")
if instanceID != nil {
fields["instance.id"] = instanceID
}
fields["go.version"] = runtime.Version()
// If no logger is found, just return the standard logger.
logger = logrus.StandardLogger().WithFields(fields)
}
fields := logrus.Fields{}
for _, key := range keys {
v := ctx.Value(key)
if v != nil {
fields[fmt.Sprint(key)] = v
}
}
return logger.WithFields(fields)
}

View file

@ -1,104 +0,0 @@
package context
import (
"runtime"
"time"
"github.com/docker/distribution/uuid"
)
// WithTrace allocates a traced timing span in a new context. This allows a
// caller to track the time between calling WithTrace and the returned done
// function. When the done function is called, a log message is emitted with a
// "trace.duration" field, corresponding to the elapsed time and a
// "trace.func" field, corresponding to the function that called WithTrace.
//
// The logging keys "trace.id" and "trace.parent.id" are provided to implement
// dapper-like tracing. This function should be complemented with a WithSpan
// method that could be used for tracing distributed RPC calls.
//
// The main benefit of this function is to post-process log messages or
// intercept them in a hook to provide timing data. Trace ids and parent ids
// can also be linked to provide call tracing, if so required.
//
// Here is an example of the usage:
//
// func timedOperation(ctx Context) {
// ctx, done := WithTrace(ctx)
// defer done("this will be the log message")
// // ... function body ...
// }
//
// If the function ran for roughly 1s, such a usage would emit a log message
// as follows:
//
// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id=<id> ...
//
// Notice that the function name is automatically resolved, along with the
// package and a trace id is emitted that can be linked with parent ids.
func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) {
if ctx == nil {
ctx = Background()
}
pc, file, line, _ := runtime.Caller(1)
f := runtime.FuncForPC(pc)
ctx = &traced{
Context: ctx,
id: uuid.Generate().String(),
start: time.Now(),
parent: GetStringValue(ctx, "trace.id"),
fnname: f.Name(),
file: file,
line: line,
}
return ctx, func(format string, a ...interface{}) {
GetLogger(ctx,
"trace.duration",
"trace.id",
"trace.parent.id",
"trace.func",
"trace.file",
"trace.line").
Debugf(format, a...)
}
}
// traced represents a context that is traced for function call timing. It
// also provides fast lookup for the various attributes that are available on
// the trace.
type traced struct {
Context
id string
parent string
start time.Time
fnname string
file string
line int
}
func (ts *traced) Value(key interface{}) interface{} {
switch key {
case "trace.start":
return ts.start
case "trace.duration":
return time.Since(ts.start)
case "trace.id":
return ts.id
case "trace.parent.id":
if ts.parent == "" {
return nil // must return nil to signal no parent.
}
return ts.parent
case "trace.func":
return ts.fnname
case "trace.file":
return ts.file
case "trace.line":
return ts.line
}
return ts.Context.Value(key)
}

View file

@ -1,24 +0,0 @@
package context
import (
"time"
)
// Since looks up key, which should be a time.Time, and returns the duration
// since that time. If the key is not found, the value returned will be zero.
// This is helpful when inferring metrics related to context execution times.
func Since(ctx Context, key interface{}) time.Duration {
if startedAt, ok := ctx.Value(key).(time.Time); ok {
return time.Since(startedAt)
}
return 0
}
// GetStringValue returns a string value from the context. The empty string
// will be returned if not found.
func GetStringValue(ctx Context, key interface{}) (value string) {
if valuev, ok := ctx.Value(key).(string); ok {
value = valuev
}
return value
}

View file

@ -1,16 +0,0 @@
package context
// WithVersion stores the application version in the context. The new context
// gets a logger to ensure log messages are marked with the application
// version.
func WithVersion(ctx Context, version string) Context {
ctx = WithValue(ctx, "version", version)
// push a new logger onto the stack
return WithLogger(ctx, GetLogger(ctx, "version"))
}
// GetVersion returns the application version from the context. An empty
// string may returned if the version was not set on the context.
func GetVersion(ctx Context) string {
return GetStringValue(ctx, "version")
}

View file

@ -1,7 +0,0 @@
#!/usr/bin/env bash
# Given a subpackage and the containing package, figures out which packages
# need to be passed to `go test -coverpkg`: this includes all of the
# subpackage's dependencies within the containing package, as well as the
# subpackage itself.
DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)"
echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ','

View file

@ -61,6 +61,12 @@ type ManifestEnumerator interface {
Enumerate(ctx context.Context, ingester func(digest.Digest) error) error
}
// SignaturesGetter provides an interface for getting the signatures of a schema1 manifest. If the digest
// referred to is not a schema1 manifest, an error should be returned.
type SignaturesGetter interface {
GetSignatures(ctx context.Context, manifestDigest digest.Digest) ([]digest.Digest, error)
}
// Describable is an interface for descriptors
type Describable interface {
Descriptor() Descriptor

View file

@ -1,126 +0,0 @@
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
// can be generated.
//
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
package uuid
import (
"crypto/rand"
"fmt"
"io"
"os"
"syscall"
"time"
)
const (
// Bits is the number of bits in a UUID
Bits = 128
// Size is the number of bytes in a UUID
Size = Bits / 8
format = "%08x-%04x-%04x-%04x-%012x"
)
var (
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
// Loggerf can be used to override the default logging destination. Such
// log messages in this library should be logged at warning or higher.
Loggerf = func(format string, args ...interface{}) {}
)
// UUID represents a UUID value. UUIDs can be compared and set to other values
// and accessed by byte.
type UUID [Size]byte
// Generate creates a new, version 4 uuid.
func Generate() (u UUID) {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
)
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
Loggerf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
return u
}
// Parse attempts to extract a uuid from the string or returns an error.
func Parse(s string) (u UUID, err error) {
if len(s) != 36 {
return UUID{}, ErrUUIDInvalid
}
// create stack addresses for each section of the uuid.
p := make([][]byte, 5)
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
return u, err
}
copy(u[0:4], p[0])
copy(u[4:6], p[1])
copy(u[6:8], p[2])
copy(u[8:10], p[3])
copy(u[10:16], p[4])
return
}
func (u UUID) String() string {
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

View file

@ -2,66 +2,31 @@ package graphdriver
import "sync"
type minfo struct {
check bool
count int
}
// RefCounter is a generic counter for use by graphdriver Get/Put calls
type RefCounter struct {
counts map[string]*minfo
mu sync.Mutex
checker Checker
counts map[string]int
mu sync.Mutex
}
// NewRefCounter returns a new RefCounter
func NewRefCounter(c Checker) *RefCounter {
return &RefCounter{
checker: c,
counts: make(map[string]*minfo),
}
func NewRefCounter() *RefCounter {
return &RefCounter{counts: make(map[string]int)}
}
// Increment increaes the ref count for the given id and returns the current count
func (c *RefCounter) Increment(path string) int {
func (c *RefCounter) Increment(id string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count++
c.counts[id]++
count := c.counts[id]
c.mu.Unlock()
return m.count
return count
}
// Decrement decreases the ref count for the given id and returns the current count
func (c *RefCounter) Decrement(path string) int {
func (c *RefCounter) Decrement(id string) int {
c.mu.Lock()
m := c.counts[path]
if m == nil {
m = &minfo{}
c.counts[path] = m
}
// if we are checking this path for the first time check to make sure
// if it was already mounted on the system and make sure we have a correct ref
// count if it is mounted as it is in use.
if !m.check {
m.check = true
if c.checker.IsMounted(path) {
m.count++
}
}
m.count--
c.counts[id]--
count := c.counts[id]
c.mu.Unlock()
return m.count
return count
}

View file

@ -46,12 +46,9 @@ type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDM
type ProtoDriver interface {
// String returns a string representation of this driver.
String() string
// CreateReadWrite creates a new, empty filesystem layer that is ready
// to be used as the storage for a container.
CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error
// Create creates a new, empty, filesystem layer with the
// specified id and parent and mountLabel. Parent and mountLabel may be "".
Create(id, parent, mountLabel string, storageOpt map[string]string) error
Create(id, parent, mountLabel string) error
// Remove attempts to remove the filesystem layer with this id.
Remove(id string) error
// Get returns the mountpoint for the layered filesystem referred
@ -113,17 +110,11 @@ type FileGetCloser interface {
Close() error
}
// Checker makes checks on specified filesystems.
type Checker interface {
// IsMounted returns true if the provided path is mounted for the specific checker
IsMounted(path string) bool
}
func init() {
drivers = make(map[string]InitFunc)
}
// Register registers an InitFunc for the driver.
// Register registers a InitFunc for the driver.
func Register(name string, initFunc InitFunc) error {
if _, exists := drivers[name]; exists {
return fmt.Errorf("Name already registered %s", name)

View file

@ -5,8 +5,6 @@ package graphdriver
import (
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
const (
@ -16,8 +14,6 @@ const (
FsMagicBtrfs = FsMagic(0x9123683E)
// FsMagicCramfs filesystem id for Cramfs
FsMagicCramfs = FsMagic(0x28cd3d45)
// FsMagicEcryptfs filesystem id for eCryptfs
FsMagicEcryptfs = FsMagic(0xf15f)
// FsMagicExtfs filesystem id for Extfs
FsMagicExtfs = FsMagic(0x0000EF53)
// FsMagicF2fs filesystem id for F2fs
@ -93,36 +89,6 @@ func GetFSMagic(rootpath string) (FsMagic, error) {
return FsMagic(buf.Type), nil
}
// NewFsChecker returns a checker configured for the provied FsMagic
func NewFsChecker(t FsMagic) Checker {
return &fsChecker{
t: t,
}
}
type fsChecker struct {
t FsMagic
}
func (c *fsChecker) IsMounted(path string) bool {
m, _ := Mounted(c.t, path)
return m
}
// NewDefaultChecker returns a check that parses /proc/mountinfo to check
// if the specified path is mounted.
func NewDefaultChecker() Checker {
return &defaultChecker{}
}
type defaultChecker struct {
}
func (c *defaultChecker) IsMounted(path string) bool {
m, _ := mount.Mounted(path)
return m
}
// Mounted checks if the given path is mounted as the fs type
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
var buf syscall.Statfs_t

View file

@ -1,65 +0,0 @@
// +build solaris,cgo
package graphdriver
/*
#include <sys/statvfs.h>
#include <stdlib.h>
static inline struct statvfs *getstatfs(char *s) {
struct statvfs *buf;
int err;
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
err = statvfs(s, buf);
return buf;
}
*/
import "C"
import (
"path/filepath"
"unsafe"
log "github.com/Sirupsen/logrus"
)
const (
// FsMagicZfs filesystem id for Zfs
FsMagicZfs = FsMagic(0x2fc12fc1)
)
var (
// Slice of drivers that should be used in an order
priority = []string{
"zfs",
}
// FsNames maps filesystem id to name of the filesystem.
FsNames = map[FsMagic]string{
FsMagicZfs: "zfs",
}
)
// GetFSMagic returns the filesystem id given the path.
func GetFSMagic(rootpath string) (FsMagic, error) {
return 0, nil
}
// Mounted checks if the given path is mounted as the fs type
//Solaris supports only ZFS for now
func Mounted(fsType FsMagic, mountPath string) (bool, error) {
cs := C.CString(filepath.Dir(mountPath))
buf := C.getstatfs(cs)
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
(buf.f_basetype[3] != 0) {
log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
C.free(unsafe.Pointer(buf))
return false, ErrPrerequisites
}
C.free(unsafe.Pointer(buf))
C.free(unsafe.Pointer(cs))
return true, nil
}

View file

@ -1,4 +1,4 @@
// +build !linux,!windows,!freebsd,!solaris
// +build !linux,!windows,!freebsd
package graphdriver

View file

@ -4,6 +4,8 @@ var (
// Slice of drivers that should be used in order
priority = []string{
"windowsfilter",
"windowsdiff",
"vfs",
}
)

View file

@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
GIDMaps: gdw.gidMaps}
start := time.Now().UTC()
logrus.Debug("Start untar layer")
logrus.Debugf("Start untar layer")
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
return
}

View file

@ -23,7 +23,7 @@ func lookupPlugin(name, home string, opts []string) (Driver, error) {
if err != nil {
return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err)
}
return newPluginDriver(name, home, opts, pl.Client())
return newPluginDriver(name, home, opts, pl.Client)
}
func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) {

View file

@ -54,23 +54,7 @@ func (d *graphDriverProxy) String() string {
return d.name
}
func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,
MountLabel: mountLabel,
}
var ret graphDriverResponse
if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil {
return err
}
if ret.Err != "" {
return errors.New(ret.Err)
}
return nil
}
func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error {
func (d *graphDriverProxy) Create(id, parent, mountLabel string) error {
args := &graphDriverRequest{
ID: id,
Parent: parent,

View file

@ -9,7 +9,6 @@ import (
"github.com/Sirupsen/logrus"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
// IDWalkFunc is function called by StoreBackend.Walk
@ -119,7 +118,12 @@ func (s *fs) Set(data []byte) (ID, error) {
}
id := ID(digest.FromBytes(data))
if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil {
filePath := s.contentFile(id)
tempFilePath := s.contentFile(id) + ".tmp"
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
return "", err
}
if err := os.Rename(tempFilePath, filePath); err != nil {
return "", err
}
@ -152,7 +156,12 @@ func (s *fs) SetMetadata(id ID, key string, data []byte) error {
if err := os.MkdirAll(baseDir, 0700); err != nil {
return err
}
return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600)
filePath := filepath.Join(s.metadataDir(id), key)
tempFilePath := filePath + ".tmp"
if err := ioutil.WriteFile(tempFilePath, data, 0600); err != nil {
return err
}
return os.Rename(tempFilePath, filePath)
}
// GetMetadata returns metadata for a given ID.

View file

@ -48,11 +48,9 @@ type V1Image struct {
// Image stores the image configuration
type Image struct {
V1Image
Parent ID `json:"parent,omitempty"`
RootFS *RootFS `json:"rootfs,omitempty"`
History []History `json:"history,omitempty"`
OSVersion string `json:"os.version,omitempty"`
OSFeatures []string `json:"os.features,omitempty"`
Parent ID `json:"parent,omitempty"`
RootFS *RootFS `json:"rootfs,omitempty"`
History []History `json:"history,omitempty"`
// rawJSON caches the immutable JSON associated with this image.
rawJSON []byte

View file

@ -2,14 +2,6 @@ package image
import "github.com/docker/docker/layer"
// TypeLayers is used for RootFS.Type for filesystems organized into layers.
const TypeLayers = "layers"
// NewRootFS returns empty RootFS struct
func NewRootFS() *RootFS {
return &RootFS{Type: TypeLayers}
}
// Append appends a new diffID to rootfs
func (r *RootFS) Append(id layer.DiffID) {
r.DiffIDs = append(r.DiffIDs, id)

View file

@ -16,3 +16,8 @@ type RootFS struct {
func (r *RootFS) ChainID() layer.ChainID {
return layer.CreateChainID(r.DiffIDs)
}
// NewRootFS returns empty RootFS struct
func NewRootFS() *RootFS {
return &RootFS{Type: "layers"}
}

View file

@ -10,9 +10,6 @@ import (
"github.com/docker/docker/layer"
)
// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer.
const TypeLayersWithBase = "layers+base"
// RootFS describes images root filesystem
// This is currently a placeholder that only supports layers. In the future
// this can be made into an interface that supports different implementations.
@ -24,25 +21,17 @@ type RootFS struct {
// BaseLayerID returns the 64 byte hex ID for the baselayer name.
func (r *RootFS) BaseLayerID() string {
if r.Type != TypeLayersWithBase {
panic("tried to get base layer ID without a base layer")
}
baseID := sha512.Sum384([]byte(r.BaseLayer))
return fmt.Sprintf("%x", baseID[:32])
}
// ChainID returns the ChainID for the top layer in RootFS.
func (r *RootFS) ChainID() layer.ChainID {
ids := r.DiffIDs
if r.Type == TypeLayersWithBase {
// Add an extra ID for the base.
baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID())))
ids = append([]layer.DiffID{baseDiffID}, ids...)
}
return layer.CreateChainID(ids)
baseDiffID := digest.FromBytes([]byte(r.BaseLayerID()))
return layer.CreateChainID(append([]layer.DiffID{layer.DiffID(baseDiffID)}, r.DiffIDs...))
}
// NewRootFSWithBaseLayer returns a RootFS struct with a base layer
func NewRootFSWithBaseLayer(baseLayer string) *RootFS {
return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer}
// NewRootFS returns empty RootFS struct
func NewRootFS() *RootFS {
return &RootFS{Type: "layers+base"}
}

View file

@ -3,7 +3,6 @@ package v1
import (
"encoding/json"
"fmt"
"reflect"
"regexp"
"strings"
@ -11,7 +10,7 @@ import (
"github.com/docker/distribution/digest"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/engine-api/types/versions"
"github.com/docker/docker/pkg/version"
)
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
@ -19,7 +18,7 @@ var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
// noFallbackMinVersion is the minimum version for which v1compatibility
// information will not be marshaled through the Image struct to remove
// blank fields.
var noFallbackMinVersion = "1.8.3"
var noFallbackMinVersion = version.Version("1.8.3")
// HistoryFromConfig creates a History struct from v1 configuration JSON
func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) {
@ -77,7 +76,7 @@ func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []im
return nil, err
}
useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion)
useFallback := version.Version(dver.DockerVersion).LessThan(noFallbackMinVersion)
if useFallback {
var v1Image image.V1Image
@ -119,15 +118,8 @@ func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway
}
// Delete fields that didn't exist in old manifest
imageType := reflect.TypeOf(img).Elem()
for i := 0; i < imageType.NumField(); i++ {
f := imageType.Field(i)
jsonName := strings.Split(f.Tag.Get("json"), ",")[0]
// Parent is handled specially below.
if jsonName != "" && jsonName != "parent" {
delete(configAsMap, jsonName)
}
}
delete(configAsMap, "rootfs")
delete(configAsMap, "history")
configAsMap["id"] = rawJSON(v1ID)
if parentV1ID != "" {
configAsMap["parent"] = rawJSON(parentV1ID)

View file

@ -2,7 +2,6 @@ package layer
import (
"compress/gzip"
"encoding/json"
"errors"
"fmt"
"io"
@ -14,7 +13,6 @@ import (
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/ioutils"
)
@ -100,14 +98,6 @@ func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error {
return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644)
}
func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error {
jsonRef, err := json.Marshal(ref)
if err != nil {
return err
}
return ioutil.WriteFile(filepath.Join(fm.root, "descriptor.json"), jsonRef, 0644)
}
func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) {
f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
@ -201,24 +191,6 @@ func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) {
return content, nil
}
func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) {
content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json"))
if err != nil {
if os.IsNotExist(err) {
// only return empty descriptor to represent what is stored
return distribution.Descriptor{}, nil
}
return distribution.Descriptor{}, err
}
var ref distribution.Descriptor
err = json.Unmarshal(content, &ref)
if err != nil {
return distribution.Descriptor{}, err
}
return ref, err
}
func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) {
fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz"))
if err != nil {

View file

@ -14,7 +14,6 @@ import (
"io"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/pkg/archive"
)
@ -51,7 +50,7 @@ var (
// greater than the 125 max.
ErrMaxDepthExceeded = errors.New("max depth exceeded")
// ErrNotSupported is used when the action is not supported
// ErrNotSupported is used when the action is not supppoted
// on the current platform
ErrNotSupported = errors.New("not support on this platform")
)
@ -172,9 +171,10 @@ type Store interface {
Get(ChainID) (Layer, error)
Release(Layer) ([]Metadata, error)
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error)
CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error)
GetRWLayer(id string) (RWLayer, error)
GetMountID(id string) (string, error)
ReinitRWLayer(l RWLayer) error
ReleaseRWLayer(RWLayer) ([]Metadata, error)
Cleanup() error
@ -182,12 +182,6 @@ type Store interface {
DriverName() string
}
// DescribableStore represents a layer store capable of storing
// descriptors for layers.
type DescribableStore interface {
RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error)
}
// MetadataTransaction represents functions for setting layer metadata
// with a single transaction.
type MetadataTransaction interface {
@ -195,7 +189,6 @@ type MetadataTransaction interface {
SetParent(parent ChainID) error
SetDiffID(DiffID) error
SetCacheID(string) error
SetDescriptor(distribution.Descriptor) error
TarSplitWriter(compressInput bool) (io.WriteCloser, error)
Commit(ChainID) error
@ -215,7 +208,6 @@ type MetadataStore interface {
GetParent(ChainID) (ChainID, error)
GetDiffID(ChainID) (DiffID, error)
GetCacheID(ChainID) (string, error)
GetDescriptor(ChainID) (distribution.Descriptor, error)
TarSplitReader(ChainID) (io.ReadCloser, error)
SetMountID(string, string) error

View file

@ -8,7 +8,6 @@ import (
"sync"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
"github.com/docker/docker/daemon/graphdriver"
"github.com/docker/docker/pkg/archive"
@ -129,11 +128,6 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err)
}
descriptor, err := ls.store.GetDescriptor(layer)
if err != nil {
return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
}
cl = &roLayer{
chainID: layer,
diffID: diff,
@ -141,7 +135,6 @@ func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) {
cacheID: cacheID,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if parent != "" {
@ -235,10 +228,6 @@ func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent stri
}
func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{})
}
func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
// err is used to hold the error which will always trigger
// cleanup of creates sources but may not be an error returned
// to the caller (already exists).
@ -272,10 +261,9 @@ func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descr
referenceCount: 1,
layerStore: ls,
references: map[Layer]struct{}{},
descriptor: descriptor,
}
if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil {
if err = ls.driver.Create(layer.cacheID, pid, ""); err != nil {
return nil, err
}
@ -429,7 +417,7 @@ func (ls *layerStore) Release(l Layer) ([]Metadata, error) {
return ls.releaseLayer(layer)
}
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) {
func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit) (RWLayer, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[name]
@ -466,14 +454,14 @@ func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel stri
}
if initFunc != nil {
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt)
pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc)
if err != nil {
return nil, err
}
m.initID = pid
}
if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil {
if err = ls.driver.Create(m.mountID, pid, ""); err != nil {
return nil, err
}
@ -507,6 +495,25 @@ func (ls *layerStore) GetMountID(id string) (string, error) {
return mount.mountID, nil
}
// ReinitRWLayer reinitializes a given mount to the layerstore, specifically
// initializing the usage count. It should strictly only be used in the
// daemon's restore path to restore state of live containers.
func (ls *layerStore) ReinitRWLayer(l RWLayer) error {
ls.mountL.Lock()
defer ls.mountL.Unlock()
m, ok := ls.mounts[l.Name()]
if !ok {
return ErrMountDoesNotExist
}
if err := m.incActivityCount(l); err != nil {
return err
}
return nil
}
func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) {
ls.mountL.Lock()
defer ls.mountL.Unlock()
@ -576,14 +583,14 @@ func (ls *layerStore) saveMount(mount *mountedLayer) error {
return nil
}
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) {
func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit) (string, error) {
// Use "<graph-id>-init" to maintain compatibility with graph drivers
// which are expecting this layer with this special name. If all
// graph drivers can be updated to not rely on knowing about this layer
// then the initID should be randomly generated.
initID := fmt.Sprintf("%s-init", graphID)
if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil {
if err := ls.driver.Create(initID, parent, mountLabel); err != nil {
return "", err
}
p, err := ls.driver.Get(initID, "")

View file

@ -1,11 +0,0 @@
package layer
import (
"io"
"github.com/docker/distribution"
)
func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) {
return ls.registerWithDescriptor(ts, parent, descriptor)
}

View file

@ -1,4 +1,4 @@
// +build linux freebsd darwin openbsd solaris
// +build linux freebsd darwin openbsd
package layer

View file

@ -2,6 +2,7 @@ package layer
import (
"io"
"sync"
"github.com/docker/docker/pkg/archive"
)
@ -49,6 +50,14 @@ func (ml *mountedLayer) Parent() Layer {
return nil
}
func (ml *mountedLayer) Mount(mountLabel string) (string, error) {
return ml.layerStore.driver.Get(ml.mountID, mountLabel)
}
func (ml *mountedLayer) Unmount() error {
return ml.layerStore.driver.Put(ml.mountID)
}
func (ml *mountedLayer) Size() (int64, error) {
return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent())
}
@ -74,30 +83,106 @@ func (ml *mountedLayer) hasReferences() bool {
return len(ml.references) > 0
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
if _, ok := ml.references[ref]; !ok {
func (ml *mountedLayer) incActivityCount(ref RWLayer) error {
rl, ok := ml.references[ref]
if !ok {
return ErrLayerNotRetained
}
if err := rl.acquire(); err != nil {
return err
}
return nil
}
func (ml *mountedLayer) deleteReference(ref RWLayer) error {
rl, ok := ml.references[ref]
if !ok {
return ErrLayerNotRetained
}
if err := rl.release(); err != nil {
return err
}
delete(ml.references, ref)
return nil
}
func (ml *mountedLayer) retakeReference(r RWLayer) {
if ref, ok := r.(*referencedRWLayer); ok {
ref.activityCount = 0
ml.references[ref] = ref
}
}
type referencedRWLayer struct {
*mountedLayer
activityL sync.Mutex
activityCount int
}
func (rl *referencedRWLayer) acquire() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
rl.activityCount++
return nil
}
func (rl *referencedRWLayer) release() error {
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount > 0 {
return ErrActiveMount
}
rl.activityCount = -1
return nil
}
func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount == -1 {
return "", ErrLayerNotRetained
}
if rl.activityCount > 0 {
rl.activityCount++
return rl.path, nil
}
m, err := rl.mountedLayer.Mount(mountLabel)
if err == nil {
rl.activityCount++
rl.path = m
}
return m, err
}
// Unmount decrements the activity count and unmounts the underlying layer
// Callers should only call `Unmount` once per call to `Mount`, even on error.
func (rl *referencedRWLayer) Unmount() error {
return rl.layerStore.driver.Put(rl.mountedLayer.mountID)
rl.activityL.Lock()
defer rl.activityL.Unlock()
if rl.activityCount == 0 {
return ErrNotMounted
}
if rl.activityCount == -1 {
return ErrLayerNotRetained
}
rl.activityCount--
if rl.activityCount > 0 {
return nil
}
return rl.mountedLayer.Unmount()
}

View file

@ -4,7 +4,6 @@ import (
"fmt"
"io"
"github.com/docker/distribution"
"github.com/docker/distribution/digest"
)
@ -15,7 +14,6 @@ type roLayer struct {
cacheID string
size int64
layerStore *layerStore
descriptor distribution.Descriptor
referenceCount int
references map[Layer]struct{}
@ -120,12 +118,6 @@ func storeLayer(tx MetadataTransaction, layer *roLayer) error {
if err := tx.SetCacheID(layer.cacheID); err != nil {
return err
}
// Do not store empty descriptors
if layer.descriptor.Digest != "" {
if err := tx.SetDescriptor(layer.descriptor); err != nil {
return err
}
}
if layer.parent != nil {
if err := tx.SetParent(layer.parent.chainID); err != nil {
return err

View file

@ -1,9 +0,0 @@
package layer
import "github.com/docker/distribution"
var _ distribution.Describable = &roLayer{}
func (rl *roLayer) Descriptor() distribution.Descriptor {
return rl.descriptor
}

View file

@ -1 +0,0 @@
This code provides helper functions for dealing with archive files.

View file

@ -33,8 +33,6 @@ type (
Reader io.Reader
// Compression is the state represents if compressed or not.
Compression int
// WhiteoutFormat is the format of whiteouts unpacked
WhiteoutFormat int
// TarChownOptions wraps the chown options UID and GID.
TarChownOptions struct {
UID, GID int
@ -49,10 +47,6 @@ type (
GIDMaps []idtools.IDMap
ChownOpts *TarChownOptions
IncludeSourceDir bool
// WhiteoutFormat is the expected on disk format for whiteout files.
// This format will be converted to the standard format on pack
// and from the standard format on unpack.
WhiteoutFormat WhiteoutFormat
// When unpacking, specifies whether overwriting a directory with a
// non-directory is allowed and vice versa.
NoOverwriteDirNonDir bool
@ -99,14 +93,6 @@ const (
Xz
)
const (
// AUFSWhiteoutFormat is the default format for whiteouts
AUFSWhiteoutFormat WhiteoutFormat = iota
// OverlayWhiteoutFormat formats whiteout according to the overlay
// standard.
OverlayWhiteoutFormat
)
// IsArchive checks for the magic bytes of a tar or any supported compression
// algorithm.
func IsArchive(header []byte) bool {
@ -144,7 +130,7 @@ func DetectCompression(source []byte) Compression {
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
} {
if len(source) < len(m) {
logrus.Debug("Len too short")
logrus.Debugf("Len too short")
continue
}
if bytes.Compare(m, source[:len(m)]) == 0 {
@ -160,7 +146,7 @@ func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) {
return cmdStream(exec.Command(args[0], args[1:]...), archive)
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
// DecompressStream decompress the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
p := pools.BufioReader32KPool
buf := p.Get(archive)
@ -206,8 +192,8 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
}
}
// CompressStream compresseses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
// CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.WriteCloser, compression Compression) (io.WriteCloser, error) {
p := pools.BufioWriter32KPool
buf := p.Get(dest)
switch compression {
@ -242,11 +228,6 @@ func (compression *Compression) Extension() string {
return ""
}
type tarWhiteoutConverter interface {
ConvertWrite(*tar.Header, string, os.FileInfo) error
ConvertRead(*tar.Header, string) (bool, error)
}
type tarAppender struct {
TarWriter *tar.Writer
Buffer *bufio.Writer
@ -255,12 +236,6 @@ type tarAppender struct {
SeenFiles map[uint64]string
UIDMaps []idtools.IDMap
GIDMaps []idtools.IDMap
// For packing and unpacking whiteout files in the
// non standard format. The whiteout files defined
// by the AUFS standard are used as the tar whiteout
// standard.
WhiteoutConverter tarWhiteoutConverter
}
// canonicalTarName provides a platform-independent and consistent posix-style
@ -278,7 +253,6 @@ func canonicalTarName(name string, isDir bool) (string, error) {
return name, nil
}
// addTarFile adds to the tar archive a file from `path` as `name`
func (ta *tarAppender) addTarFile(path, name string) error {
fi, err := os.Lstat(path)
if err != nil {
@ -349,17 +323,11 @@ func (ta *tarAppender) addTarFile(path, name string) error {
hdr.Gid = xGID
}
if ta.WhiteoutConverter != nil {
if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil {
return err
}
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
return err
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
if hdr.Typeflag == tar.TypeReg {
file, err := os.Open(path)
if err != nil {
return err
@ -440,7 +408,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
case tar.TypeXGlobalHeader:
logrus.Debug("PAX Global Extended Headers found and ignored")
logrus.Debugf("PAX Global Extended Headers found and ignored")
return nil
default:
@ -457,26 +425,10 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
}
}
var errors []string
for key, value := range hdr.Xattrs {
if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil {
if err == syscall.ENOTSUP {
// We ignore errors here because not all graphdrivers support
// xattrs *cough* old versions of AUFS *cough*. However only
// ENOTSUP should be emitted in that case, otherwise we still
// bail.
errors = append(errors, err.Error())
continue
}
return err
}
}
if len(errors) > 0 {
logrus.WithFields(logrus.Fields{
"errors": errors,
}).Warn("ignored xattrs in archive: underlying filesystem doesn't support them")
}
// There is no LChmod, so ignore mode for symlink. Also, this
@ -540,12 +492,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
TarWriter: tar.NewWriter(compressWriter),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: options.UIDMaps,
GIDMaps: options.GIDMaps,
}
defer func() {
@ -683,7 +634,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
if err := ta.addTarFile(filePath, relFilePath); err != nil {
logrus.Errorf("Can't add file %s to tar: %s", filePath, err)
// if pipe is broken, stop writing tar stream to it
// if pipe is broken, stop writting tar stream to it
if err == io.ErrClosedPipe {
return err
}
@ -707,7 +658,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
if err != nil {
return err
}
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
// Iterate through the files in the archive.
loop:
@ -807,16 +757,6 @@ loop:
hdr.Gid = xGID
}
if whiteoutConverter != nil {
writeFile, err := whiteoutConverter.ConvertRead(hdr, path)
if err != nil {
return err
}
if !writeFile {
continue
}
}
if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil {
return err
}

View file

@ -1,90 +0,0 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/system"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
hdr.Name = WhiteoutPrefix + hdr.Name
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
*hdr = tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return nil
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View file

@ -1,7 +0,0 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View file

@ -81,33 +81,6 @@ func sameFsTimeSpec(a, b syscall.Timespec) bool {
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
@ -132,24 +105,21 @@ func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Chan
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
// Skip AUFS metadata
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
return err
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
file := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(file, WhiteoutPrefix) {
originalFile := file[len(WhiteoutPrefix):]
change.Path = filepath.Join(filepath.Dir(path), originalFile)
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added

View file

@ -283,30 +283,3 @@ func clen(n []byte) int {
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View file

@ -11,11 +11,19 @@ import (
"io/ioutil"
"os"
"runtime"
"syscall"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
)
func chroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return err
}
return syscall.Chdir("/")
}
// untar is the entry-point for docker-untar on re-exec. This is not used on
// Windows as it does not support chroot, hence no point sandboxing through
// chroot and rexec.
@ -80,7 +88,7 @@ func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.T
// pending on write pipe forever
io.Copy(ioutil.Discard, decompressedArchive)
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
return fmt.Errorf("Untar re-exec error: %v: output: %s", err, output)
}
return nil
}

View file

@ -1,103 +0,0 @@
package chrootarchive
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/mount"
)
// chroot on linux uses pivot_root instead of chroot
// pivot_root takes a new root and an old root.
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
// New root is where the new rootfs is set to.
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
// This is similar to how libcontainer sets up a container's rootfs
func chroot(path string) (err error) {
if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil {
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
}
if err := mount.MakeRPrivate(path); err != nil {
return err
}
// setup oldRoot for pivot_root
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
if err != nil {
return fmt.Errorf("Error setting up pivot dir: %v", err)
}
var mounted bool
defer func() {
if mounted {
// make sure pivotDir is not mounted before we try to remove it
if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil {
if err == nil {
err = errCleanup
}
return
}
}
errCleanup := os.Remove(pivotDir)
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
// because we already cleaned it up on failed pivot_root
if errCleanup != nil && !os.IsNotExist(errCleanup) {
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
if err == nil {
err = errCleanup
}
}
if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil {
if err == nil {
err = fmt.Errorf("error unmounting root: %v", errCleanup)
}
return
}
}()
if err := syscall.PivotRoot(path, pivotDir); err != nil {
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
}
return realChroot(path)
}
mounted = true
// This is the new path for where the old root (prior to the pivot) has been moved to
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root: %v", err)
}
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("Error making old root private after pivot: %v", err)
}
// Now unmount the old root so it's no longer visible from the new root
if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
}
mounted = false
return nil
}
func realChroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return fmt.Errorf("Error after fallback to chroot: %v", err)
}
if err := syscall.Chdir("/"); err != nil {
return fmt.Errorf("Error changing to new root after chroot: %v", err)
}
return nil
}

View file

@ -1,12 +0,0 @@
// +build !windows,!linux
package chrootarchive
import "syscall"
func chroot(path string) error {
if err := syscall.Chroot(path); err != nil {
return err
}
return syscall.Chdir("/")
}

View file

@ -13,12 +13,12 @@ import (
"github.com/Sirupsen/logrus"
)
// exclusion returns true if the specified pattern is an exclusion
// exclusion return true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty returns true if the specified pattern is empty
// empty return true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
@ -217,7 +217,7 @@ func regexpMatch(pattern, path string) (bool, error) {
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and removes
// on src or an error occurs. It verifies src exists and remove
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)

View file

@ -1,27 +0,0 @@
package fileutils
import (
"os"
"os/exec"
"strconv"
"strings"
)
// GetTotalUsedFds returns the number of used File Descriptors by
// executing `lsof -p PID`
func GetTotalUsedFds() int {
pid := os.Getpid()
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
output, err := cmd.CombinedOutput()
if err != nil {
return -1
}
outputStr := strings.TrimSpace(string(output))
fds := strings.Split(outputStr, "\n")
return len(fds) - 1
}

View file

@ -1,7 +0,0 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View file

@ -8,7 +8,6 @@ import (
"sort"
"strconv"
"strings"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
@ -17,7 +16,6 @@ import (
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
cmdTemplates = map[string]string{
@ -33,6 +31,15 @@ var (
userMod = "usermod"
)
func init() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
}
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
@ -87,14 +94,7 @@ func AddNamespaceRangesUser(name string) (int, int, error) {
}
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}

View file

@ -1,51 +0,0 @@
package ioutils
import (
"errors"
"io"
)
var errBufferFull = errors.New("buffer is full")
type fixedBuffer struct {
buf []byte
pos int
lastRead int
}
func (b *fixedBuffer) Write(p []byte) (int, error) {
n := copy(b.buf[b.pos:cap(b.buf)], p)
b.pos += n
if n < len(p) {
if b.pos == cap(b.buf) {
return n, errBufferFull
}
return n, io.ErrShortWrite
}
return n, nil
}
func (b *fixedBuffer) Read(p []byte) (int, error) {
n := copy(p, b.buf[b.lastRead:b.pos])
b.lastRead += n
return n, nil
}
func (b *fixedBuffer) Len() int {
return b.pos - b.lastRead
}
func (b *fixedBuffer) Cap() int {
return cap(b.buf)
}
func (b *fixedBuffer) Reset() {
b.pos = 0
b.lastRead = 0
b.buf = b.buf[:0]
}
func (b *fixedBuffer) String() string {
return string(b.buf[b.lastRead:b.pos])
}

View file

@ -9,20 +9,12 @@ import (
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// minCap is the lowest capacity to use in byte slices that buffer data
const minCap = 64
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
bufPoolsLock sync.Mutex
)
// ErrClosed is returned when Write is called on a closed BytesPipe.
var ErrClosed = errors.New("write to closed BytesPipe")
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
@ -31,17 +23,22 @@ var (
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf []*fixedBuffer
bufLen int
closeErr error // error to return from next Read. set to nil if not closed.
buf [][]byte // slice of byte-slices of buffered data
lastRead int // index in the first slice to a read point
bufLen int // length of data buffered over the slices
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))
func NewBytesPipe(buf []byte) *BytesPipe {
if cap(buf) == 0 {
buf = make([]byte, 0, 64)
}
bp := &BytesPipe{
buf: [][]byte{buf[:0]},
}
bp.wait = sync.NewCond(&bp.mu)
return bp
}
@ -50,31 +47,23 @@ func NewBytesPipe() *BytesPipe {
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
defer bp.mu.Unlock()
written := 0
loop0:
for {
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
if len(bp.buf) == 0 {
bp.buf = append(bp.buf, getBuffer(64))
}
// get the last buffer
// write data to the last buffer
b := bp.buf[len(bp.buf)-1]
n, err := b.Write(p)
written += n
// copy data to the current empty allocated area
n := copy(b[len(b):cap(b)], p)
// increment buffered data length
bp.bufLen += n
// include written data in last buffer
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
written += n
// if there was enough room to write all then break
if len(p) == n {
@ -84,7 +73,7 @@ loop0:
// more data: write to the next slice
p = p[n:]
// make sure the buffer doesn't grow too big from this write
// block if too much data is still in the buffer
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
if bp.closeErr != nil {
@ -92,15 +81,15 @@ loop0:
}
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
// allocate slice that has twice the size of the last unless maximum reached
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
if nextCap > maxCap {
nextCap = maxCap
}
bp.buf = append(bp.buf, getBuffer(nextCap))
// add new byte slice to the buffers slice and continue writing
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
}
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
@ -122,65 +111,46 @@ func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
func (bp *BytesPipe) len() int {
return bp.bufLen - bp.lastRead
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
if bp.bufLen == 0 {
defer bp.mu.Unlock()
if bp.len() == 0 {
if bp.closeErr != nil {
bp.mu.Unlock()
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
if bp.len() == 0 && bp.closeErr != nil {
return 0, bp.closeErr
}
}
for bp.bufLen > 0 {
b := bp.buf[0]
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
for {
read := copy(p, bp.buf[0][bp.lastRead:])
n += read
bp.bufLen -= read
if b.Len() == 0 {
// it's empty so return it to the pool and move to the next one
returnBuffer(b)
bp.buf[0] = nil
bp.buf = bp.buf[1:]
bp.lastRead += read
if bp.len() == 0 {
// we have read everything. reset to the beginning.
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = bp.buf[0][:0]
break
}
// break if everything was read
if len(p) == read {
break
}
// more buffered data and more asked. read from next slice.
p = p[read:]
bp.lastRead = 0
bp.bufLen -= len(bp.buf[0])
bp.buf[0] = nil // throw away old slice
bp.buf = bp.buf[1:] // switch to next
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}
func returnBuffer(b *fixedBuffer) {
b.Reset()
bufPoolsLock.Lock()
pool := bufPools[b.Cap()]
bufPoolsLock.Unlock()
if pool != nil {
pool.Put(b)
}
}
func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()
return pool.Get().(*fixedBuffer)
}

View file

@ -1,82 +0,0 @@
package ioutils
import (
"io"
"io/ioutil"
"os"
"path/filepath"
)
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
// temporary file and closing it atomically changes the temporary file to
// destination path. Writing and closing concurrently is not allowed.
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
if err != nil {
return nil, err
}
abspath, err := filepath.Abs(filename)
if err != nil {
return nil, err
}
return &atomicFileWriter{
f: f,
fn: abspath,
perm: perm,
}, nil
}
// AtomicWriteFile atomically writes data to a file named by filename.
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
f, err := NewAtomicFileWriter(filename, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
f.(*atomicFileWriter).writeErr = err
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
type atomicFileWriter struct {
f *os.File
fn string
writeErr error
perm os.FileMode
}
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
n, err := w.f.Write(dt)
if err != nil {
w.writeErr = err
}
return n, err
}
func (w *atomicFileWriter) Close() (retErr error) {
defer func() {
if retErr != nil || w.writeErr != nil {
os.Remove(w.f.Name())
}
}()
if err := w.f.Sync(); err != nil {
w.f.Close()
return err
}
if err := w.f.Close(); err != nil {
return err
}
if err := os.Chmod(w.f.Name(), w.perm); err != nil {
return err
}
if w.writeErr == nil {
return os.Rename(w.f.Name(), w.fn)
}
return nil
}

View file

@ -55,7 +55,7 @@ func HashData(src io.Reader) (string, error) {
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
}
// OnEOFReader wraps an io.ReadCloser and a function
// OnEOFReader wraps a io.ReadCloser and a function
// the function will run at the end of file or close the file.
type OnEOFReader struct {
Rc io.ReadCloser

View file

@ -0,0 +1,6 @@
// +build !gccgo
package ioutils
func callSchedulerIfNecessary() {
}

View file

@ -0,0 +1,13 @@
// +build gccgo
package ioutils
import (
"runtime"
)
func callSchedulerIfNecessary() {
//allow or force Go scheduler to switch context, without explicitly
//forcing this will make it hang when using gccgo implementation
runtime.Gosched()
}

40
vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go generated vendored Normal file
View file

@ -0,0 +1,40 @@
package jsonlog
import (
"encoding/json"
"fmt"
"time"
)
// JSONLog represents a log message, typically a single entry from a given log stream.
// JSONLogs can be easily serialized to and from JSON and support custom formatting.
type JSONLog struct {
// Log is the log message
Log string `json:"log,omitempty"`
// Stream is the log source
Stream string `json:"stream,omitempty"`
// Created is the created timestamp of log
Created time.Time `json:"time"`
}
// Format returns the log formatted according to format
// If format is nil, returns the log message
// If format is json, returns the log marshaled in json format
// By default, returns the log with the log time formatted according to format.
func (jl *JSONLog) Format(format string) (string, error) {
if format == "" {
return jl.Log, nil
}
if format == "json" {
m, err := json.Marshal(jl)
return string(m), err
}
return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil
}
// Reset resets the log to nil.
func (jl *JSONLog) Reset() {
jl.Log = ""
jl.Stream = ""
jl.Created = time.Time{}
}

View file

@ -0,0 +1,178 @@
// This code was initially generated by ffjson <https://github.com/pquerna/ffjson>
// This code was generated via the following steps:
// $ go get -u github.com/pquerna/ffjson
// $ make BIND_DIR=. shell
// $ ffjson pkg/jsonlog/jsonlog.go
// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go
//
// It has been modified to improve the performance of time marshalling to JSON
// and to clean it up.
// Should this code need to be regenerated when the JSONLog struct is changed,
// the relevant changes which have been made are:
// import (
// "bytes"
//-
// "unicode/utf8"
// )
//
// func (mj *JSONLog) MarshalJSON() ([]byte, error) {
//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) {
// }
// return buf.Bytes(), nil
// }
//+
// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
//- var err error
//- var obj []byte
//- var first bool = true
//- _ = obj
//- _ = err
//- _ = first
//+ var (
//+ err error
//+ timestamp string
//+ first bool = true
//+ )
// buf.WriteString(`{`)
// if len(mj.Log) != 0 {
// if first == true {
//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
// buf.WriteString(`,`)
// }
// buf.WriteString(`"time":`)
//- obj, err = mj.Created.MarshalJSON()
//+ timestamp, err = FastTimeMarshalJSON(mj.Created)
// if err != nil {
// return err
// }
//- buf.Write(obj)
//+ buf.WriteString(timestamp)
// buf.WriteString(`}`)
// return nil
// }
// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
// if len(mj.Log) != 0 {
// - if first == true {
// - first = false
// - } else {
// - buf.WriteString(`,`)
// - }
// + first = false
// buf.WriteString(`"log":`)
// ffjsonWriteJSONString(buf, mj.Log)
// }
package jsonlog
import (
"bytes"
"unicode/utf8"
)
// MarshalJSON marshals the JSONLog.
func (mj *JSONLog) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
buf.Grow(1024)
if err := mj.MarshalJSONBuf(&buf); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer.
func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
var (
err error
timestamp string
first = true
)
buf.WriteString(`{`)
if len(mj.Log) != 0 {
first = false
buf.WriteString(`"log":`)
ffjsonWriteJSONString(buf, mj.Log)
}
if len(mj.Stream) != 0 {
if first {
first = false
} else {
buf.WriteString(`,`)
}
buf.WriteString(`"stream":`)
ffjsonWriteJSONString(buf, mj.Stream)
}
if !first {
buf.WriteString(`,`)
}
buf.WriteString(`"time":`)
timestamp, err = FastTimeMarshalJSON(mj.Created)
if err != nil {
return err
}
buf.WriteString(timestamp)
buf.WriteString(`}`)
return nil
}
func ffjsonWriteJSONString(buf *bytes.Buffer, s string) {
const hex = "0123456789abcdef"
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
buf.WriteString(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
default:
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
if c == '\u2028' || c == '\u2029' {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\u202`)
buf.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.WriteString(s[start:])
}
buf.WriteByte('"')
}

View file

@ -0,0 +1,122 @@
package jsonlog
import (
"bytes"
"encoding/json"
"unicode/utf8"
)
// JSONLogs is based on JSONLog.
// It allows marshalling JSONLog from Log as []byte
// and an already marshalled Created timestamp.
type JSONLogs struct {
Log []byte `json:"log,omitempty"`
Stream string `json:"stream,omitempty"`
Created string `json:"time"`
// json-encoded bytes
RawAttrs json.RawMessage `json:"attrs,omitempty"`
}
// MarshalJSONBuf is based on the same method from JSONLog
// It has been modified to take into account the necessary changes.
func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
var first = true
buf.WriteString(`{`)
if len(mj.Log) != 0 {
first = false
buf.WriteString(`"log":`)
ffjsonWriteJSONBytesAsString(buf, mj.Log)
}
if len(mj.Stream) != 0 {
if first == true {
first = false
} else {
buf.WriteString(`,`)
}
buf.WriteString(`"stream":`)
ffjsonWriteJSONString(buf, mj.Stream)
}
if len(mj.RawAttrs) > 0 {
if first {
first = false
} else {
buf.WriteString(`,`)
}
buf.WriteString(`"attrs":`)
buf.Write(mj.RawAttrs)
}
if !first {
buf.WriteString(`,`)
}
buf.WriteString(`"time":`)
buf.WriteString(mj.Created)
buf.WriteString(`}`)
return nil
}
// This is based on ffjsonWriteJSONBytesAsString. It has been changed
// to accept a string passed as a slice of bytes.
func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
const hex = "0123456789abcdef"
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
buf.Write(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
default:
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRune(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
if c == '\u2028' || c == '\u2029' {
if start < i {
buf.Write(s[start:i])
}
buf.WriteString(`\u202`)
buf.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.Write(s[start:])
}
buf.WriteByte('"')
}

View file

@ -0,0 +1,27 @@
// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON.
package jsonlog
import (
"errors"
"time"
)
const (
// RFC3339NanoFixed is our own version of RFC339Nano because we want one
// that pads the nano seconds part with zeros to ensure
// the timestamps are aligned in the logs.
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
// JSONFormat is the format used by FastMarshalJSON
JSONFormat = `"` + time.RFC3339Nano + `"`
)
// FastTimeMarshalJSON avoids one of the extra allocations that
// time.MarshalJSON is making.
func FastTimeMarshalJSON(t time.Time) (string, error) {
if y := t.Year(); y < 0 || y >= 10000 {
// RFC 3339 is clear that years are 4 digits exactly.
// See golang.org/issue/4556#c15 for more discussion.
return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
}
return t.Format(JSONFormat), nil
}

View file

@ -0,0 +1,221 @@
package jsonmessage
import (
"encoding/json"
"fmt"
"io"
"strings"
"time"
"github.com/docker/docker/pkg/jsonlog"
"github.com/docker/docker/pkg/term"
"github.com/docker/go-units"
)
// JSONError wraps a concrete Code and Message, `Code` is
// is a integer error code, `Message` is the error message.
type JSONError struct {
Code int `json:"code,omitempty"`
Message string `json:"message,omitempty"`
}
func (e *JSONError) Error() string {
return e.Message
}
// JSONProgress describes a Progress. terminalFd is the fd of the current terminal,
// Start is the initial value for the operation. Current is the current status and
// value of the progress made towards Total. Total is the end value describing when
// we made 100% progress for an operation.
type JSONProgress struct {
terminalFd uintptr
Current int64 `json:"current,omitempty"`
Total int64 `json:"total,omitempty"`
Start int64 `json:"start,omitempty"`
}
func (p *JSONProgress) String() string {
var (
width = 200
pbBox string
numbersBox string
timeLeftBox string
)
ws, err := term.GetWinsize(p.terminalFd)
if err == nil {
width = int(ws.Width)
}
if p.Current <= 0 && p.Total <= 0 {
return ""
}
current := units.HumanSize(float64(p.Current))
if p.Total <= 0 {
return fmt.Sprintf("%8v", current)
}
total := units.HumanSize(float64(p.Total))
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
if percentage > 50 {
percentage = 50
}
if width > 110 {
// this number can't be negative gh#7136
numSpaces := 0
if 50-percentage > 0 {
numSpaces = 50 - percentage
}
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
}
numbersBox = fmt.Sprintf("%8v/%v", current, total)
if p.Current > p.Total {
// remove total display if the reported current is wonky.
numbersBox = fmt.Sprintf("%8v", current)
}
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0))
perEntry := fromStart / time.Duration(p.Current)
left := time.Duration(p.Total-p.Current) * perEntry
left = (left / time.Second) * time.Second
if width > 50 {
timeLeftBox = " " + left.String()
}
}
return pbBox + numbersBox + timeLeftBox
}
// JSONMessage defines a message struct. It describes
// the created time, where it from, status, ID of the
// message. It's used for docker events.
type JSONMessage struct {
Stream string `json:"stream,omitempty"`
Status string `json:"status,omitempty"`
Progress *JSONProgress `json:"progressDetail,omitempty"`
ProgressMessage string `json:"progress,omitempty"` //deprecated
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
ErrorMessage string `json:"error,omitempty"` //deprecated
// Aux contains out-of-band data, such as digests for push signing.
Aux *json.RawMessage `json:"aux,omitempty"`
}
// Display displays the JSONMessage to `out`. `isTerminal` describes if `out`
// is a terminal. If this is the case, it will erase the entire current line
// when displaying the progressbar.
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
if jm.Error != nil {
if jm.Error.Code == 401 {
return fmt.Errorf("Authentication is required.")
}
return jm.Error
}
var endl string
if isTerminal && jm.Stream == "" && jm.Progress != nil {
// <ESC>[2K = erase entire current line
fmt.Fprintf(out, "%c[2K\r", 27)
endl = "\r"
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
return nil
}
if jm.TimeNano != 0 {
fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed))
} else if jm.Time != 0 {
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed))
}
if jm.ID != "" {
fmt.Fprintf(out, "%s: ", jm.ID)
}
if jm.From != "" {
fmt.Fprintf(out, "(from %s) ", jm.From)
}
if jm.Progress != nil && isTerminal {
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
} else if jm.ProgressMessage != "" { //deprecated
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
} else if jm.Stream != "" {
fmt.Fprintf(out, "%s%s", jm.Stream, endl)
} else {
fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
}
return nil
}
// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal`
// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of
// each line and move the cursor while displaying.
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error {
var (
dec = json.NewDecoder(in)
ids = make(map[string]int)
)
for {
diff := 0
var jm JSONMessage
if err := dec.Decode(&jm); err != nil {
if err == io.EOF {
break
}
return err
}
if jm.Aux != nil {
if auxCallback != nil {
auxCallback(jm.Aux)
}
continue
}
if jm.Progress != nil {
jm.Progress.terminalFd = terminalFd
}
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
line, ok := ids[jm.ID]
if !ok {
// NOTE: This approach of using len(id) to
// figure out the number of lines of history
// only works as long as we clear the history
// when we output something that's not
// accounted for in the map, such as a line
// with no ID.
line = len(ids)
ids[jm.ID] = line
if isTerminal {
fmt.Fprintf(out, "\n")
}
} else {
diff = len(ids) - line
}
if isTerminal {
// NOTE: this appears to be necessary even if
// diff == 0.
// <ESC>[{diff}A = move cursor up diff rows
fmt.Fprintf(out, "%c[%dA", 27, diff)
}
} else {
// When outputting something that isn't progress
// output, clear the history of previous lines. We
// don't want progress entries from some previous
// operation to be updated (for example, pull -a
// with multiple tags).
ids = make(map[string]int)
}
err := jm.Display(out, isTerminal)
if jm.ID != "" && isTerminal {
// NOTE: this appears to be necessary even if
// diff == 0.
// <ESC>[{diff}B = move cursor down diff rows
fmt.Fprintf(out, "%c[%dB", 27, diff)
}
if err != nil {
return err
}
}
return nil
}

View file

@ -5,112 +5,6 @@ import (
"strings"
)
var flags = map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
var validFlags = map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
var propagationFlags = map[string]bool{
"bind": true,
"rbind": true,
"unbindable": true,
"runbindable": true,
"private": true,
"rprivate": true,
"shared": true,
"rshared": true,
"slave": true,
"rslave": true,
}
// MergeTmpfsOptions merge mount options to make sure there is no duplicate.
func MergeTmpfsOptions(options []string) ([]string, error) {
// We use collisions maps to remove duplicates.
// For flag, the key is the flag value (the key for propagation flag is -1)
// For data=value, the key is the data
flagCollisions := map[int]bool{}
dataCollisions := map[string]bool{}
var newOptions []string
// We process in reverse order
for i := len(options) - 1; i >= 0; i-- {
option := options[i]
if option == "defaults" {
continue
}
if f, ok := flags[option]; ok && f.flag != 0 {
// There is only one propagation mode
key := f.flag
if propagationFlags[option] {
key = -1
}
// Check to see if there is collision for flag
if !flagCollisions[key] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
flagCollisions[key] = true
}
continue
}
opt := strings.SplitN(option, "=", 2)
if len(opt) != 2 || !validFlags[opt[0]] {
return nil, fmt.Errorf("Invalid tmpfs option %q", opt)
}
if !dataCollisions[opt[0]] {
// We prepend the option and add to collision map
newOptions = append([]string{option}, newOptions...)
dataCollisions[opt[0]] = true
}
}
return newOptions, nil
}
// Parse fstab type mount options into mount() flags
// and device specific data
func parseOptions(options string) (int, string) {
@ -119,6 +13,45 @@ func parseOptions(options string) (int, string) {
data []string
)
flags := map[string]struct {
clear bool
flag int
}{
"defaults": {false, 0},
"ro": {false, RDONLY},
"rw": {true, RDONLY},
"suid": {true, NOSUID},
"nosuid": {false, NOSUID},
"dev": {true, NODEV},
"nodev": {false, NODEV},
"exec": {true, NOEXEC},
"noexec": {false, NOEXEC},
"sync": {false, SYNCHRONOUS},
"async": {true, SYNCHRONOUS},
"dirsync": {false, DIRSYNC},
"remount": {false, REMOUNT},
"mand": {false, MANDLOCK},
"nomand": {true, MANDLOCK},
"atime": {true, NOATIME},
"noatime": {false, NOATIME},
"diratime": {true, NODIRATIME},
"nodiratime": {false, NODIRATIME},
"bind": {false, BIND},
"rbind": {false, RBIND},
"unbindable": {false, UNBINDABLE},
"runbindable": {false, RUNBINDABLE},
"private": {false, PRIVATE},
"rprivate": {false, RPRIVATE},
"shared": {false, SHARED},
"rshared": {false, RSHARED},
"slave": {false, SLAVE},
"rslave": {false, RSLAVE},
"relatime": {false, RELATIME},
"norelatime": {true, RELATIME},
"strictatime": {false, STRICTATIME},
"nostrictatime": {true, STRICTATIME},
}
for _, o := range strings.Split(options, ",") {
// If the option does not exist in the flags table or the flag
// is not supported on the platform,
@ -139,6 +72,16 @@ func parseOptions(options string) (int, string) {
// ParseTmpfsOptions parse fstab type mount options into flags and data
func ParseTmpfsOptions(options string) (int, string, error) {
flags, data := parseOptions(options)
validFlags := map[string]bool{
"": true,
"size": true,
"mode": true,
"uid": true,
"gid": true,
"nr_inodes": true,
"nr_blocks": true,
"mpol": true,
}
for _, o := range strings.Split(data, ",") {
opt := strings.SplitN(o, "=", 2)
if !validFlags[opt[0]] {

View file

@ -1,4 +1,4 @@
// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
// +build !linux,!freebsd freebsd,!cgo
package mount

View file

@ -9,8 +9,8 @@ func GetMounts() ([]*Info, error) {
return parseMountTable()
}
// Mounted determines if a specified mountpoint has been mounted.
// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
// Mounted looks at /proc/self/mountinfo to determine of the specified
// mountpoint has been mounted
func Mounted(mountpoint string) (bool, error) {
entries, err := parseMountTable()
if err != nil {

View file

@ -1,33 +0,0 @@
// +build solaris,cgo
package mount
import (
"golang.org/x/sys/unix"
"unsafe"
)
// #include <stdlib.h>
// #include <stdio.h>
// #include <sys/mount.h>
// int Mount(const char *spec, const char *dir, int mflag,
// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
// }
import "C"
func mount(device, target, mType string, flag uintptr, data string) error {
spec := C.CString(device)
dir := C.CString(target)
fstype := C.CString(mType)
_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
C.free(unsafe.Pointer(spec))
C.free(unsafe.Pointer(dir))
C.free(unsafe.Pointer(fstype))
return err
}
func unmount(target string, flag int) error {
err := unix.Unmount(target, flag)
return err
}

View file

@ -1,4 +1,4 @@
// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
// +build !linux,!freebsd freebsd,!cgo
package mount

View file

@ -1,37 +0,0 @@
// +build solaris,cgo
package mount
/*
#include <stdio.h>
#include <sys/mnttab.h>
*/
import "C"
import (
"fmt"
)
func parseMountTable() ([]*Info, error) {
mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
if mnttab == nil {
return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
}
var out []*Info
var mp C.struct_mnttab
ret := C.getmntent(mnttab, &mp)
for ret == 0 {
var mountinfo Info
mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
mountinfo.Source = C.GoString(mp.mnt_special)
mountinfo.Fstype = C.GoString(mp.mnt_fstype)
mountinfo.Opts = C.GoString(mp.mnt_mntopts)
out = append(out, &mountinfo)
ret = C.getmntent(mnttab, &mp)
}
C.fclose(mnttab)
return out, nil
}

View file

@ -1,4 +1,4 @@
// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
// +build !windows,!linux,!freebsd freebsd,!cgo
package mount

View file

@ -20,16 +20,14 @@ const (
)
// NewClient creates a new plugin client (http).
func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) {
func NewClient(addr string, tlsConfig tlsconfig.Options) (*Client, error) {
tr := &http.Transport{}
if tlsConfig != nil {
c, err := tlsconfig.Client(*tlsConfig)
if err != nil {
return nil, err
}
tr.TLSClientConfig = c
c, err := tlsconfig.Client(tlsConfig)
if err != nil {
return nil, err
}
tr.TLSClientConfig = c
u, err := url.Parse(addr)
if err != nil {
@ -132,7 +130,7 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool)
return nil, err
}
retries++
logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff)
logrus.Warnf("Unable to connect to plugin: %s:%s, retrying in %v", req.URL.Host, req.URL.Path, timeOff)
time.Sleep(timeOff)
continue
}

View file

@ -64,7 +64,7 @@ func (l *localRegistry) Plugin(name string) (*Plugin, error) {
for _, p := range socketpaths {
if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 {
return NewLocalPlugin(name, "unix://"+p), nil
return newLocalPlugin(name, "unix://"+p), nil
}
}
@ -101,7 +101,7 @@ func readPluginInfo(name, path string) (*Plugin, error) {
return nil, fmt.Errorf("Unknown protocol")
}
return NewLocalPlugin(name, addr), nil
return newLocalPlugin(name, addr), nil
}
func readPluginJSONInfo(name, path string) (*Plugin, error) {
@ -115,7 +115,7 @@ func readPluginJSONInfo(name, path string) (*Plugin, error) {
if err := json.NewDecoder(f).Decode(&p); err != nil {
return nil, err
}
p.name = name
p.Name = name
if len(p.TLSConfig.CAFile) == 0 {
p.TLSConfig.InsecureSkipVerify = true
}

View file

@ -55,41 +55,29 @@ type Manifest struct {
// Plugin is the definition of a docker plugin.
type Plugin struct {
// Name of the plugin
name string
Name string `json:"-"`
// Address of the plugin
Addr string
// TLS configuration of the plugin
TLSConfig *tlsconfig.Options
TLSConfig tlsconfig.Options
// Client attached to the plugin
client *Client
Client *Client `json:"-"`
// Manifest of the plugin (see above)
Manifest *Manifest `json:"-"`
// error produced by activation
activateErr error
// specifies if the activation sequence is completed (not if it is successful or not)
// specifies if the activation sequence is completed (not if it is sucessful or not)
activated bool
// wait for activation to finish
activateWait *sync.Cond
}
// Name returns the name of the plugin.
func (p *Plugin) Name() string {
return p.name
}
// Client returns a ready-to-use plugin client that can be used to communicate with the plugin.
func (p *Plugin) Client() *Client {
return p.client
}
// NewLocalPlugin creates a new local plugin.
func NewLocalPlugin(name, addr string) *Plugin {
func newLocalPlugin(name, addr string) *Plugin {
return &Plugin{
name: name,
Addr: addr,
// TODO: change to nil
TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true},
Name: name,
Addr: addr,
TLSConfig: tlsconfig.Options{InsecureSkipVerify: true},
activateWait: sync.NewCond(&sync.Mutex{}),
}
}
@ -114,10 +102,10 @@ func (p *Plugin) activateWithLock() error {
if err != nil {
return err
}
p.client = c
p.Client = c
m := new(Manifest)
if err = p.client.Call("Plugin.Activate", nil, m); err != nil {
if err = p.Client.Call("Plugin.Activate", nil, m); err != nil {
return err
}
@ -128,7 +116,7 @@ func (p *Plugin) activateWithLock() error {
if !handled {
continue
}
handler(p.name, p.client)
handler(p.Name, p.Client)
}
return nil
}

View file

@ -28,7 +28,7 @@ const buffer32K = 32 * 1024
// BufioReaderPool is a bufio reader that uses sync.Pool.
type BufioReaderPool struct {
pool *sync.Pool
pool sync.Pool
}
func init() {
@ -39,7 +39,7 @@ func init() {
// newBufioReaderPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
pool := &sync.Pool{
pool := sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
}
return &BufioReaderPool{pool: pool}
@ -80,13 +80,13 @@ func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Rea
// BufioWriterPool is a bufio writer that uses sync.Pool.
type BufioWriterPool struct {
pool *sync.Pool
pool sync.Pool
}
// newBufioWriterPoolWithSize is unexported because new pools should be
// added here to be shared where required.
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
pool := &sync.Pool{
pool := sync.Pool{
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
}
return &BufioWriterPool{pool: pool}

View file

@ -1,5 +0,0 @@
## reexec
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
the exec of the binary will be used to find and execute custom init paths.

View file

@ -1,4 +1,4 @@
// +build freebsd solaris
// +build freebsd
package reexec

View file

@ -1,4 +1,4 @@
// +build !linux,!windows,!freebsd,!solaris
// +build !linux,!windows,!freebsd
package reexec

View file

@ -12,7 +12,7 @@ var registeredInitializers = make(map[string]func())
// Register adds an initialization func under the specified name
func Register(name string, initializer func()) {
if _, exists := registeredInitializers[name]; exists {
panic(fmt.Sprintf("reexec func already registered under name %q", name))
panic(fmt.Sprintf("reexec func already registred under name %q", name))
}
registeredInitializers[name] = initializer

178
vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
package stdcopy
import (
"encoding/binary"
"errors"
"fmt"
"io"
"github.com/Sirupsen/logrus"
)
// StdType is the type of standard stream
// a writer can multiplex to.
type StdType byte
const (
// Stdin represents standard input stream type.
Stdin StdType = iota
// Stdout represents standard output stream type.
Stdout
// Stderr represents standard error steam type.
Stderr
stdWriterPrefixLen = 8
stdWriterFdIndex = 0
stdWriterSizeIndex = 4
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
)
// stdWriter is wrapper of io.Writer with extra customized info.
type stdWriter struct {
io.Writer
prefix byte
}
// Write sends the buffer to the underneath writer.
// It insert the prefix header before the buffer,
// so stdcopy.StdCopy knows where to multiplex the output.
// It makes stdWriter to implement io.Writer.
func (w *stdWriter) Write(buf []byte) (n int, err error) {
if w == nil || w.Writer == nil {
return 0, errors.New("Writer not instantiated")
}
if buf == nil {
return 0, nil
}
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(buf)))
line := append(header[:], buf...)
n, err = w.Writer.Write(line)
n -= stdWriterPrefixLen
if n < 0 {
n = 0
}
return
}
// NewStdWriter instantiates a new Writer.
// Everything written to it will be encapsulated using a custom format,
// and written to the underlying `w` stream.
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
// `t` indicates the id of the stream to encapsulate.
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
func NewStdWriter(w io.Writer, t StdType) io.Writer {
return &stdWriter{
Writer: w,
prefix: byte(t),
}
}
// StdCopy is a modified version of io.Copy.
//
// StdCopy will demultiplex `src`, assuming that it contains two streams,
// previously multiplexed together using a StdWriter instance.
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
//
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
// In other words: if `err` is non nil, it indicates a real underlying error.
//
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
var (
buf = make([]byte, startingBufLen)
bufLen = len(buf)
nr, nw int
er, ew error
out io.Writer
frameSize int
)
for {
// Make sure we have at least a full header
for nr < stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < stdWriterPrefixLen {
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading header: %s", er)
return 0, er
}
}
// Check the first byte to know where to write
switch StdType(buf[stdWriterFdIndex]) {
case Stdin:
fallthrough
case Stdout:
// Write on stdout
out = dstout
case Stderr:
// Write on stderr
out = dsterr
default:
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
}
// Retrieve the size of the frame
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
logrus.Debugf("framesize: %d", frameSize)
// Check if the buffer is big enough to read the frame.
// Extend it if necessary.
if frameSize+stdWriterPrefixLen > bufLen {
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
bufLen = len(buf)
}
// While the amount of bytes read is less than the size of the frame + header, we keep reading
for nr < frameSize+stdWriterPrefixLen {
var nr2 int
nr2, er = src.Read(buf[nr:])
nr += nr2
if er == io.EOF {
if nr < frameSize+stdWriterPrefixLen {
logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
return written, nil
}
break
}
if er != nil {
logrus.Debugf("Error reading frame: %s", er)
return 0, er
}
}
// Write the retrieved frame (without header)
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
if ew != nil {
logrus.Debugf("Error writing frame: %s", ew)
return 0, ew
}
// If the frame has not been fully written: error
if nw != frameSize {
logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
return 0, io.ErrShortWrite
}
written += int64(nw)
// Move the rest of the buffer to the beginning
copy(buf, buf[frameSize+stdWriterPrefixLen:])
// Move the index
nr -= frameSize + stdWriterPrefixLen
}
}

View file

@ -1 +0,0 @@
This package provides helper functions for dealing with string identifiers

View file

@ -24,7 +24,7 @@ func IsShortID(id string) bool {
// TruncateID returns a shorthand version of a string identifier for convenience.
// A collision with other shorthands is very unlikely, but possible.
// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller
// will need to use a longer prefix, or the full-length Id.
// will need to use a langer prefix, or the full-length Id.
func TruncateID(id string) string {
if i := strings.IndexRune(id, ':'); i >= 0 {
id = id[i+1:]
@ -57,7 +57,7 @@ func generateID(crypto bool) string {
}
}
// GenerateRandomID returns a unique id.
// GenerateRandomID returns an unique id.
func GenerateRandomID() string {
return generateID(true)

View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2014-2016 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View file

@ -0,0 +1,27 @@
Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

143
vendor/github.com/docker/docker/pkg/symlink/fs.go generated vendored Normal file
View file

@ -0,0 +1,143 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE.BSD file.
// This code is a modified version of path/filepath/symlink.go from the Go standard library.
package symlink
import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/system"
)
// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an
// absolute path. This function handles paths in a platform-agnostic manner.
func FollowSymlinkInScope(path, root string) (string, error) {
path, err := filepath.Abs(filepath.FromSlash(path))
if err != nil {
return "", err
}
root, err = filepath.Abs(filepath.FromSlash(root))
if err != nil {
return "", err
}
return evalSymlinksInScope(path, root)
}
// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return
// a result guaranteed to be contained within the scope `root`, at the time of the call.
// Symlinks in `root` are not evaluated and left as-is.
// Errors encountered while attempting to evaluate symlinks in path will be returned.
// Non-existing paths are valid and do not constitute an error.
// `path` has to contain `root` as a prefix, or else an error will be returned.
// Trying to break out from `root` does not constitute an error.
//
// Example:
// If /foo/bar -> /outside,
// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide"
//
// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks
// are created and not to create subsequently, additional symlinks that could potentially make a
// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo")
// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should
// no longer be considered safely contained in "/foo".
func evalSymlinksInScope(path, root string) (string, error) {
root = filepath.Clean(root)
if path == root {
return path, nil
}
if !strings.HasPrefix(path, root) {
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
}
const maxIter = 255
originalPath := path
// given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c"
path = path[len(root):]
if root == string(filepath.Separator) {
path = string(filepath.Separator) + path
}
if !strings.HasPrefix(path, string(filepath.Separator)) {
return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root)
}
path = filepath.Clean(path)
// consume path by taking each frontmost path element,
// expanding it if it's a symlink, and appending it to b
var b bytes.Buffer
// b here will always be considered to be the "current absolute path inside
// root" when we append paths to it, we also append a slash and use
// filepath.Clean after the loop to trim the trailing slash
for n := 0; path != ""; n++ {
if n > maxIter {
return "", errors.New("evalSymlinksInScope: too many links in " + originalPath)
}
// find next path component, p
i := strings.IndexRune(path, filepath.Separator)
var p string
if i == -1 {
p, path = path, ""
} else {
p, path = path[:i], path[i+1:]
}
if p == "" {
continue
}
// this takes a b.String() like "b/../" and a p like "c" and turns it
// into "/b/../c" which then gets filepath.Cleaned into "/c" and then
// root gets prepended and we Clean again (to remove any trailing slash
// if the first Clean gave us just "/")
cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p)
if cleanP == string(filepath.Separator) {
// never Lstat "/" itself
b.Reset()
continue
}
fullP := filepath.Clean(root + cleanP)
fi, err := os.Lstat(fullP)
if os.IsNotExist(err) {
// if p does not exist, accept it
b.WriteString(p)
b.WriteRune(filepath.Separator)
continue
}
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink == 0 {
b.WriteString(p + string(filepath.Separator))
continue
}
// it's a symlink, put it at the front of path
dest, err := os.Readlink(fullP)
if err != nil {
return "", err
}
if system.IsAbs(dest) {
b.Reset()
}
path = dest + string(filepath.Separator) + path
}
// see note above on "fullP := ..." for why this is double-cleaned and
// what's happening here
return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil
}
// EvalSymlinks returns the path name after the evaluation of any symbolic
// links.
// If path is relative the result will be relative to the current directory,
// unless one of the components is an absolute symbolic link.
// This version has been updated to support long paths prepended with `\\?\`.
func EvalSymlinks(path string) (string, error) {
return evalSymlinks(path)
}

11
vendor/github.com/docker/docker/pkg/symlink/fs_unix.go generated vendored Normal file
View file

@ -0,0 +1,11 @@
// +build !windows
package symlink
import (
"path/filepath"
)
func evalSymlinks(path string) (string, error) {
return filepath.EvalSymlinks(path)
}

View file

@ -0,0 +1,155 @@
package symlink
import (
"bytes"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/longpath"
)
func toShort(path string) (string, error) {
p, err := syscall.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetShortPathName says we can reuse buffer
n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil {
return "", err
}
}
return syscall.UTF16ToString(b), nil
}
func toLong(path string) (string, error) {
p, err := syscall.UTF16FromString(path)
if err != nil {
return "", err
}
b := p // GetLongPathName says we can reuse buffer
n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
if n > uint32(len(b)) {
b = make([]uint16, n)
n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b)))
if err != nil {
return "", err
}
}
b = b[:n]
return syscall.UTF16ToString(b), nil
}
func evalSymlinks(path string) (string, error) {
path, err := walkSymlinks(path)
if err != nil {
return "", err
}
p, err := toShort(path)
if err != nil {
return "", err
}
p, err = toLong(p)
if err != nil {
return "", err
}
// syscall.GetLongPathName does not change the case of the drive letter,
// but the result of EvalSymlinks must be unique, so we have
// EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`).
// Make drive letter upper case.
if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' {
p = string(p[0]+'A'-'a') + p[1:]
} else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' {
p = p[:3] + string(p[4]+'A'-'a') + p[5:]
}
return filepath.Clean(p), nil
}
const utf8RuneSelf = 0x80
func walkSymlinks(path string) (string, error) {
const maxIter = 255
originalPath := path
// consume path by taking each frontmost path element,
// expanding it if it's a symlink, and appending it to b
var b bytes.Buffer
for n := 0; path != ""; n++ {
if n > maxIter {
return "", errors.New("EvalSymlinks: too many links in " + originalPath)
}
// A path beginning with `\\?\` represents the root, so automatically
// skip that part and begin processing the next segment.
if strings.HasPrefix(path, longpath.Prefix) {
b.WriteString(longpath.Prefix)
path = path[4:]
continue
}
// find next path component, p
var i = -1
for j, c := range path {
if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) {
i = j
break
}
}
var p string
if i == -1 {
p, path = path, ""
} else {
p, path = path[:i], path[i+1:]
}
if p == "" {
if b.Len() == 0 {
// must be absolute path
b.WriteRune(filepath.Separator)
}
continue
}
// If this is the first segment after the long path prefix, accept the
// current segment as a volume root or UNC share and move on to the next.
if b.String() == longpath.Prefix {
b.WriteString(p)
b.WriteRune(filepath.Separator)
continue
}
fi, err := os.Lstat(b.String() + p)
if err != nil {
return "", err
}
if fi.Mode()&os.ModeSymlink == 0 {
b.WriteString(p)
if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') {
b.WriteRune(filepath.Separator)
}
continue
}
// it's a symlink, put it at the front of path
dest, err := os.Readlink(b.String() + p)
if err != nil {
return "", err
}
if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) {
b.Reset()
}
path = dest + string(filepath.Separator) + path
}
return filepath.Clean(b.String()), nil
}

View file

@ -1,128 +0,0 @@
// +build solaris,cgo
package system
import (
"fmt"
"unsafe"
)
// #cgo LDFLAGS: -lkstat
// #include <unistd.h>
// #include <stdlib.h>
// #include <stdio.h>
// #include <kstat.h>
// #include <sys/swap.h>
// #include <sys/param.h>
// struct swaptable *allocSwaptable(int num) {
// struct swaptable *st;
// struct swapent *swapent;
// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
// swapent = st->swt_ent;
// for (int i = 0; i < num; i++,swapent++) {
// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
// }
// st->swt_n = num;
// return st;
//}
// void freeSwaptable (struct swaptable *st) {
// struct swapent *swapent = st->swt_ent;
// for (int i = 0; i < st->swt_n; i++,swapent++) {
// free(swapent->ste_path);
// }
// free(st);
// }
// swapent_t getSwapEnt(swapent_t *ent, int i) {
// return ent[i];
// }
// int64_t getPpKernel() {
// int64_t pp_kernel = 0;
// kstat_ctl_t *ksc;
// kstat_t *ks;
// kstat_named_t *knp;
// kid_t kid;
//
// if ((ksc = kstat_open()) == NULL) {
// return -1;
// }
// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
// return -1;
// }
// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
// return -1;
// }
// switch (knp->data_type) {
// case KSTAT_DATA_UINT64:
// pp_kernel = knp->value.ui64;
// break;
// case KSTAT_DATA_UINT32:
// pp_kernel = knp->value.ui32;
// break;
// }
// pp_kernel *= sysconf(_SC_PAGESIZE);
// return (pp_kernel > 0 ? pp_kernel : -1);
// }
import "C"
// Get the system memory info using sysconf same as prtconf
func getTotalMem() int64 {
pagesize := C.sysconf(C._SC_PAGESIZE)
npages := C.sysconf(C._SC_PHYS_PAGES)
return int64(pagesize * npages)
}
func getFreeMem() int64 {
pagesize := C.sysconf(C._SC_PAGESIZE)
npages := C.sysconf(C._SC_AVPHYS_PAGES)
return int64(pagesize * npages)
}
// ReadMemInfo retrieves memory statistics of the host system and returns a
// MemInfo type.
func ReadMemInfo() (*MemInfo, error) {
ppKernel := C.getPpKernel()
MemTotal := getTotalMem()
MemFree := getFreeMem()
SwapTotal, SwapFree, err := getSysSwap()
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
SwapFree < 0 {
return nil, fmt.Errorf("Error getting system memory info %v\n", err)
}
meminfo := &MemInfo{}
// Total memory is total physical memory less than memory locked by kernel
meminfo.MemTotal = MemTotal - int64(ppKernel)
meminfo.MemFree = MemFree
meminfo.SwapTotal = SwapTotal
meminfo.SwapFree = SwapFree
return meminfo, nil
}
func getSysSwap() (int64, int64, error) {
var tSwap int64
var fSwap int64
var diskblksPerPage int64
num, err := C.swapctl(C.SC_GETNSWP, nil)
if err != nil {
return -1, -1, err
}
st := C.allocSwaptable(num)
_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
if err != nil {
C.freeSwaptable(st)
return -1, -1, err
}
diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
for i := 0; i < int(num); i++ {
swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
tSwap += int64(swapent.ste_pages) * diskblksPerPage
fSwap += int64(swapent.ste_free) * diskblksPerPage
}
C.freeSwaptable(st)
return tSwap, fSwap, nil
}

View file

@ -1,4 +1,4 @@
// +build !linux,!windows,!solaris
// +build !linux,!windows
package system

View file

@ -6,9 +6,3 @@ package system
// executables. Each directory is separated from the next by a colon
// ':' character .
const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
// is the system drive. This is a no-op on Linux.
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
return path, nil
}

View file

@ -2,36 +2,6 @@
package system
import (
"fmt"
"path/filepath"
"strings"
)
// DefaultPathEnv is deliberately empty on Windows as the default path will be set by
// the container. Docker has no context of what the default path should be.
const DefaultPathEnv = ""
// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
// This is used, for example, when validating a user provided path in docker cp.
// If a drive letter is supplied, it must be the system drive. The drive letter
// is always removed. Also, it translates it to OS semantics (IOW / to \). We
// need the path in this syntax so that it can ultimately be contatenated with
// a Windows long-path which doesn't support drive-letters. Examples:
// C: --> Fail
// C:\ --> \
// a --> a
// /a --> \a
// d:\ --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
if len(path) == 2 && string(path[1]) == ":" {
return "", fmt.Errorf("No relative path specified in %q", path)
}
if !filepath.IsAbs(path) || len(path) < 2 {
return filepath.FromSlash(path), nil
}
if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
return "", fmt.Errorf("The specified path is not on the system drive (C:)")
}
return filepath.FromSlash(path[2:]), nil
}

View file

@ -15,20 +15,3 @@ func fromStatT(s *syscall.Stat_t) (*StatT, error) {
rdev: uint64(s.Rdev),
mtim: s.Mtim}, nil
}
// FromStatT loads a system.StatT from a syscal.Stat_t.
func FromStatT(s *syscall.Stat_t) (*StatT, error) {
return fromStatT(s)
}
// Stat takes a path to a file and returns
// a system.StatT type pertaining to that file.
//
// Throws an error if the file does not exist
func Stat(path string) (*StatT, error) {
s := &syscall.Stat_t{}
if err := syscall.Stat(path, s); err != nil {
return nil, err
}
return fromStatT(s)
}

View file

@ -1,15 +1,9 @@
package system
import (
"fmt"
"syscall"
"unsafe"
"github.com/Sirupsen/logrus"
)
var (
ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
)
// OSVersion is a wrapper for Windows version information
@ -21,47 +15,19 @@ type OSVersion struct {
Build uint16
}
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx
type osVersionInfoEx struct {
OSVersionInfoSize uint32
MajorVersion uint32
MinorVersion uint32
BuildNumber uint32
PlatformID uint32
CSDVersion [128]uint16
ServicePackMajor uint16
ServicePackMinor uint16
SuiteMask uint16
ProductType byte
Reserve byte
}
// GetOSVersion gets the operating system version on Windows. Note that
// docker.exe must be manifested to get the correct version information.
func GetOSVersion() OSVersion {
func GetOSVersion() (OSVersion, error) {
var err error
osv := OSVersion{}
osv.Version, err = syscall.GetVersion()
if err != nil {
// GetVersion never fails.
panic(err)
return osv, fmt.Errorf("Failed to call GetVersion()")
}
osv.MajorVersion = uint8(osv.Version & 0xFF)
osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF)
osv.Build = uint16(osv.Version >> 16)
return osv
}
// IsWindowsClient returns true if the SKU is client
func IsWindowsClient() bool {
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
if r1 == 0 {
logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err)
return false
}
const verNTWorkstation = 0x00000001
return osviex.ProductType == verNTWorkstation
return osv, nil
}
// Unmount is a platform-specific helper function to call
@ -92,12 +58,3 @@ func CommandLineToArgv(commandLine string) ([]string, error) {
return newArgs, nil
}
// HasWin32KSupport determines whether containers that depend on win32k can
// run on this machine. Win32k is the driver used to implement windowing.
func HasWin32KSupport() bool {
// For now, check for ntuser API support on the host. In the future, a host
// may support win32k in containers even if the host does not support ntuser
// APIs.
return ntuserApiset.Load() == nil
}

Some files were not shown because too many files have changed in this diff Show more