fix chown and seccomp
Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
parent
60f032f6f5
commit
2569457739
8197 changed files with 30742 additions and 1596554 deletions
40
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
40
vendor/github.com/containerd/containerd/.appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
version: "{build}"
|
||||
|
||||
image: Visual Studio 2017
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\containerd\containerd
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CGO_ENABLED: 1
|
||||
GO_VERSION: 1.9
|
||||
|
||||
before_build:
|
||||
- choco install -y mingw
|
||||
# Install Go
|
||||
- rd C:\Go /s /q
|
||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.zip
|
||||
- 7z x go%GO_VERSION%.windows-amd64.zip -oC:\ >nul
|
||||
- go version
|
||||
- choco install codecov
|
||||
|
||||
build_script:
|
||||
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/bin:$PATH;
|
||||
script/setup/install-dev-tools;
|
||||
mingw32-make.exe check"
|
||||
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:$PATH ; mingw32-make.exe build binaries"
|
||||
|
||||
test_script:
|
||||
# TODO: need an equivalent of TRAVIS_COMMIT_RANGE
|
||||
# - GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" C:\MinGW\bin\mingw32-make.exe dco
|
||||
- bash.exe -lc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe coverage root-coverage"
|
||||
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH ; mingw32-make.exe integration"
|
||||
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
|
||||
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/src/github.com/containerd/containerd/bin:$PATH; TESTFLAGS_PARALLEL=1 mingw32-make.exe integration"
|
||||
|
||||
on_success:
|
||||
codecov --flag windows -f coverage.txt
|
4
vendor/github.com/containerd/containerd/.gitignore
generated
vendored
Normal file
4
vendor/github.com/containerd/containerd/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
/bin/
|
||||
coverage.txt
|
||||
profile.out
|
||||
containerd.test
|
24
vendor/github.com/containerd/containerd/.gometalinter.json
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/.gometalinter.json
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"Vendor": true,
|
||||
"Deadline": "2m",
|
||||
"Sort": ["linter", "severity", "path", "line"],
|
||||
"Exclude": [
|
||||
".*\\.pb\\.go",
|
||||
"fetch\\.go:.*::error: unrecognized printf verb 'r'"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"WarnUnmatchedDirective": true,
|
||||
|
||||
"Enable": [
|
||||
"structcheck",
|
||||
"unused",
|
||||
"varcheck",
|
||||
"staticcheck",
|
||||
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"vet"
|
||||
]
|
||||
}
|
84
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
Normal file
84
vendor/github.com/containerd/containerd/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
dist: trusty
|
||||
sudo: required
|
||||
# setup travis so that we can run containers for integration tests
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.9.x
|
||||
|
||||
go_import_path: github.com/containerd/containerd
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- btrfs-tools
|
||||
- libseccomp-dev
|
||||
- libapparmor-dev
|
||||
- libnl-3-dev
|
||||
- libnet-dev
|
||||
- protobuf-c-compiler
|
||||
# - protobuf-compiler
|
||||
- python-minimal
|
||||
- libcap-dev
|
||||
- libaio-dev
|
||||
- libprotobuf-c0-dev
|
||||
- libprotobuf-dev
|
||||
|
||||
env:
|
||||
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
|
||||
- TRAVIS_GOOS=darwin TRAVIS_CGO_ENABLED=0
|
||||
|
||||
before_install:
|
||||
- uname -r
|
||||
|
||||
install:
|
||||
- wget https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip -O /tmp/protoc-3.5.0-linux-x86_64.zip
|
||||
- sudo unzip -o -d /usr/local /tmp/protoc-3.5.0-linux-x86_64.zip
|
||||
- sudo chmod +x /usr/local/bin/protoc
|
||||
- sudo chmod og+rx /usr/local/include/google /usr/local/include/google/protobuf /usr/local/include/google/protobuf/compiler
|
||||
- sudo chmod -R og+r /usr/local/include/google/protobuf/
|
||||
- protoc --version
|
||||
- go get -u github.com/vbatts/git-validation
|
||||
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-9/runc -O /bin/runc; sudo chmod +x /bin/runc
|
||||
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
|
||||
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
|
||||
- cd /tmp/criu-3.0 && sudo make install-criu
|
||||
- cd $TRAVIS_BUILD_DIR
|
||||
|
||||
script:
|
||||
- export GOOS=$TRAVIS_GOOS
|
||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
|
||||
- GOOS=linux script/setup/install-dev-tools
|
||||
- script/validate/vendor
|
||||
- go build -i .
|
||||
- make check
|
||||
- if [ "$GOOS" = "linux" ]; then make check-protos check-api-descriptors; fi
|
||||
- make build
|
||||
- make binaries
|
||||
- if [ "$GOOS" = "linux" ]; then sudo make install ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make root-coverage ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make integration ; fi
|
||||
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
|
||||
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH TESTFLAGS_PARALLEL=1 make integration ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash) -F linux
|
||||
|
||||
before_deploy:
|
||||
- make release
|
||||
|
||||
deploy:
|
||||
provider: releases
|
||||
api_key:
|
||||
secure: HO+WSIVVUMMsbU74x+YyFsTP3ahqnR4xjwKAziedJ5lZXKJszQBhiYTFmcTeVBoouNjTISd07GQzpoLChuGC20U3+1NbT+CkK8xWR/x1ao2D3JY3Ds6AD9ubWRNWRLptt/xOn5Vq3F8xZyUYchwvDMl4zKCuTKxQGVdHKsINb2DehKcP5cVL6MMvqzEdfj2g99vqXAqs8uuo6dOmvxmHV43bfzDaAJSabjZZs6TKlWTqCQMet8uxyx2Dmjl2lxLwdqv12oJdrszacasn41NYuEyHI2bXyef1mhWGYN4n9bU/Y5winctZ8DOSOZvYg/2ziAaUN0+CTn1IESwVesrPz23P2Sy7wdLxu8dSIZ2yUHl7OsA5T5a5rDchAGguRVNBWvoGtuepEhdRacxTQUo1cMFZsEXjgRKKjdfc1emYQPVdN8mBv8GJwndty473ZXdvFt5R0kNVFtvWuYCa6UYJD2cKrsPSAfbZCDC/LiR3FOoTaUPMZUVkR2ACEO7Dn4+KlmBajqT40Osk/A7k1XA/TzVhMIpLtE0Vk2DfPmGsjCv8bC+MFd+R2Sc8SFdE92oEWRdoPQY5SxMYQtGxA+cbKVlT1kSw6y80yEbx5JZsBnT6+NTHwmDO3kVU9ztLdawOozTElKNAK8HoAyFmzIZ3wL64oThuDrv/TUuY8Iyn814=
|
||||
file_glob: true
|
||||
file: releases/*.tar.gz
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: containerd/containerd
|
||||
tags: true
|
233
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
Normal file
233
vendor/github.com/containerd/containerd/BUILDING.md
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
# Build containerd from source
|
||||
|
||||
This guide is useful if you intend to contribute on containerd. Thanks for your
|
||||
effort. Every contribution is very appreciated.
|
||||
|
||||
This doc includes:
|
||||
* [Build requirements](#build-requirements)
|
||||
* [Build the development environment](#build-the-development-environment)
|
||||
* [Build containerd](#build-containerd)
|
||||
* [Via docker container](#via-docker-container)
|
||||
* [Testing](#testing-containerd)
|
||||
|
||||
## Build requirements
|
||||
|
||||
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
|
||||
|
||||
* Go 1.9.x or above
|
||||
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
|
||||
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
First you need to setup your Go development environment. You can follow this
|
||||
guideline [How to write go code](https://golang.org/doc/code.html) and at the
|
||||
end you need to have `GOPATH` and `GOROOT` set in your environment.
|
||||
|
||||
At this point you can use `go` to checkout `containerd` in your `GOPATH`:
|
||||
|
||||
```sh
|
||||
go get github.com/containerd/containerd
|
||||
```
|
||||
|
||||
For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.5.0 release for a 64-bit Linux host:
|
||||
|
||||
```
|
||||
$ wget -c https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip
|
||||
$ sudo unzip protoc-3.5.0-linux-x86_64.zip -d /usr/local
|
||||
```
|
||||
|
||||
`containerd` uses [Btrfs](https://en.wikipedia.org/wiki/Btrfs) it means that you
|
||||
need to satisfy this dependencies in your system:
|
||||
|
||||
* CentOS/Fedora: `yum install btrfs-progs-devel`
|
||||
* Debian/Ubuntu: `apt-get install btrfs-tools`
|
||||
|
||||
At this point you are ready to build `containerd` yourself!
|
||||
|
||||
## Build containerd
|
||||
|
||||
`containerd` uses `make` to create a repeatable build flow. It means that you
|
||||
can run:
|
||||
|
||||
```sudo
|
||||
make
|
||||
```
|
||||
|
||||
This is going to build all the project binaries in the `./bin/` directory.
|
||||
|
||||
You can move them in your global path, `/usr/local/bin` with:
|
||||
|
||||
```sudo
|
||||
sudo make install
|
||||
```
|
||||
|
||||
When making any changes to the gRPC API, you can use the installed `protoc`
|
||||
compiler to regenerate the API generated code packages with:
|
||||
|
||||
```sudo
|
||||
make generate
|
||||
```
|
||||
|
||||
> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
|
||||
> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
|
||||
> Makefile target will disable the btrfs driver within the containerd Go build.
|
||||
|
||||
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
|
||||
|
||||
Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||
|
||||
### Static binaries
|
||||
|
||||
You can build static binaries by providing a few variables to `make`:
|
||||
|
||||
```sudo
|
||||
make EXTRA_FLAGS="-buildmode pie" \
|
||||
EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' \
|
||||
BUILDTAGS="static_build"
|
||||
```
|
||||
|
||||
> *Note*:
|
||||
> - static build is discouraged
|
||||
> - static containerd binary does not support loading plugins
|
||||
|
||||
# Via Docker container
|
||||
|
||||
## Build containerd
|
||||
|
||||
You can build `containerd` via a Linux-based Docker container.
|
||||
You can build an image from this `Dockerfile`:
|
||||
|
||||
```
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install btrfs-tools
|
||||
```
|
||||
|
||||
Let's suppose that you built an image called `containerd/build`. From the
|
||||
containerd source root directory you can run the following command:
|
||||
|
||||
```sh
|
||||
docker run -it \
|
||||
-v ${PWD}:/go/src/github.com/containerd/containerd \
|
||||
-e GOPATH=/go \
|
||||
-w /go/src/github.com/containerd/containerd containerd/build sh
|
||||
```
|
||||
|
||||
This mounts `containerd` repository
|
||||
|
||||
You are now ready to [build](#build-containerd):
|
||||
|
||||
```sh
|
||||
make && make install
|
||||
```
|
||||
|
||||
## Build containerd and runc
|
||||
To have complete core container runtime, you will both `containerd` and `runc`. It is possible to build both of these via Docker container.
|
||||
|
||||
You can use `go` to checkout `runc` in your `GOPATH`:
|
||||
|
||||
```sh
|
||||
go get github.com/opencontainers/runc
|
||||
```
|
||||
|
||||
We can build an image from this `Dockerfile`:
|
||||
|
||||
```sh
|
||||
FROM golang
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y btrfs-tools libapparmor-dev libseccomp-dev
|
||||
|
||||
```
|
||||
|
||||
In our Docker container we will use a specific `runc` build which includes [seccomp](https://en.wikipedia.org/wiki/seccomp) and [apparmor](https://en.wikipedia.org/wiki/AppArmor) support. Hence why our Dockerfile includes these dependencies: `libapparmor-dev` `libseccomp-dev`. Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
|
||||
|
||||
Let's suppose you build an image called `containerd/build` from the above Dockerfile. You can run the following command:
|
||||
|
||||
```sh
|
||||
docker run -it --privileged \
|
||||
-v /var/lib/containerd \
|
||||
-v ${GOPATH}/src/github.com/opencontainers/runc:/go/src/github.com/opencontainers/runc \
|
||||
-v ${GOPATH}/src/github.com/containerd/containerd:/go/src/github.com/containerd/containerd \
|
||||
-e GOPATH=/go \
|
||||
-w /go/src/github.com/containerd/containerd containerd/build sh
|
||||
```
|
||||
|
||||
This mounts both `runc` and `containerd` repositories in our Docker container.
|
||||
|
||||
From within our Docker container let's build `containerd`:
|
||||
|
||||
```sh
|
||||
cd /go/src/github.com/containerd/containerd
|
||||
make && make install
|
||||
```
|
||||
|
||||
These binaries can be found in the `./bin` directory in your host.
|
||||
`make install` will move the binaries in your `$PATH`.
|
||||
|
||||
Next, let's build `runc`:
|
||||
|
||||
```sh
|
||||
cd /go/src/github.com/opencontainers/runc
|
||||
make BUILDTAGS='seccomp apparmor' && make install
|
||||
```
|
||||
|
||||
When working with `ctr`, the simple test client we just built, don't forget to start the daemon!
|
||||
|
||||
```sh
|
||||
containerd --config config.toml
|
||||
```
|
||||
|
||||
# Testing containerd
|
||||
|
||||
During the automated CI the unit tests and integration tests are run as part of the PR validation. As a developer you can run these tests locally by using any of the following `Makefile` targets:
|
||||
- `make test`: run all non-integration tests that do not require `root` privileges
|
||||
- `make root-test`: run all non-integration tests which require `root`
|
||||
- `make integration`: run all tests, including integration tests and those which require `root`
|
||||
- `make integration-parallel`: run all tests (integration and root-required included) in parallel mode
|
||||
|
||||
To execute a specific test or set of tests you can use the `go test` capabilities
|
||||
without using the `Makefile` targets. The following examples show how to specify a test
|
||||
name and also how to use the flag directly against `go test` to run root-requiring tests.
|
||||
|
||||
```sh
|
||||
# run the test <TEST_NAME>:
|
||||
go test -v -run "<TEST_NAME>" .
|
||||
# enable the root-requiring tests:
|
||||
go test -v -run . -test.root
|
||||
```
|
||||
|
||||
Example output from directly running `go test` to execute the `TestContainerList` test:
|
||||
```sh
|
||||
sudo go test -v -run "TestContainerList" . -test.root
|
||||
INFO[0000] running tests against containerd revision=f2ae8a020a985a8d9862c9eb5ab66902c2888361 version=v1.0.0-beta.2-49-gf2ae8a0
|
||||
=== RUN TestContainerList
|
||||
--- PASS: TestContainerList (0.00s)
|
||||
PASS
|
||||
ok github.com/containerd/containerd 4.778s
|
||||
```
|
||||
|
||||
## Additional tools
|
||||
|
||||
### containerd-stress
|
||||
In addition to `go test`-based testing executed via the `Makefile` targets, the `containerd-stress` tool is available and built with the `all` or `binaries` targets and installed during `make install`.
|
||||
|
||||
With this tool you can stress a running containerd daemon for a specified period of time, selecting a concurrency level to generate stress against the daemon. The following command is an example of having five workers running for two hours against a default containerd gRPC socket address:
|
||||
|
||||
```sh
|
||||
containerd-stress -c 5 -t 120
|
||||
```
|
||||
|
||||
For more information on this tool's options please run `containerd-stress --help`.
|
||||
|
||||
### bucketbench
|
||||
[Bucketbench](https://github.com/estesp/bucketbench) is an external tool which can be used to drive load against a container runtime, specifying a particular set of lifecycle operations to run with a specified amount of concurrency. Bucketbench is more focused on generating performance details than simply inducing load against containerd.
|
||||
|
||||
Bucketbench differs from the `containerd-stress` tool in a few ways:
|
||||
- Bucketbench has support for testing the Docker engine, the `runc` binary, and containerd 0.2.x (via `ctr`) and 1.0 (via the client library) branches.
|
||||
- Bucketbench is driven via configuration file that allows specifying a list of lifecycle operations to execute. This can be used to generate detailed statistics per-command (e.g. start, stop, pause, delete).
|
||||
- Bucketbench generates detailed reports and timing data at the end of the configured test run.
|
||||
|
||||
More details on how to install and run `bucketbench` are available at the [GitHub project page](https://github.com/estesp/bucketbench).
|
118
vendor/github.com/containerd/containerd/CONTRIBUTING.md
generated
vendored
Normal file
118
vendor/github.com/containerd/containerd/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,118 @@
|
|||
# Contributing
|
||||
|
||||
Contributions should be made via pull requests. Pull requests will be reviewed
|
||||
by one or more maintainers and merged when acceptable.
|
||||
|
||||
This project is in an early state, making the impact of contributions much
|
||||
greater than at other stages. In this respect, it is important to consider any
|
||||
changes or additions for their future impact more so than their current impact.
|
||||
|
||||
## Successful Changes
|
||||
|
||||
We ask that before contributing, please make the effort to coordinate with the
|
||||
maintainers of the project before submitting large or high impact PRs. This
|
||||
will prevent you from doing extra work that may or may not be merged.
|
||||
|
||||
PRs that are just submitted without any prior communication will likely be
|
||||
summarily closed.
|
||||
|
||||
While pull requests are the methodology for submitting changes to code, changes
|
||||
are much more likely to be accepted if they are accompanied by additional
|
||||
engineering work. While we don't define this explicitly, most of these goals
|
||||
are accomplished through communication of the design goals and subsequent
|
||||
solutions. Often times, it helps to first state the problem before presenting
|
||||
solutions.
|
||||
|
||||
Typically, the best methods of accomplishing this are to submit an issue,
|
||||
stating the problem. This issue can include a problem statement and a
|
||||
checklist with requirements. If solutions are proposed, alternatives should be
|
||||
listed and eliminated. Even if the criteria for elimination of a solution is
|
||||
frivolous, say so.
|
||||
|
||||
Larger changes typically work best with design documents, similar to those found
|
||||
in `design/`. These are focused on providing context to the design at the time
|
||||
the feature was conceived and can inform future documentation contributions.
|
||||
|
||||
Make sure that new tests are added for bugs in order to catch regressions and tests
|
||||
with new features to exercise the new functionality that is added.
|
||||
|
||||
## Commit Messages
|
||||
|
||||
There are times for one line commit messages and this is not one of them.
|
||||
Commit messages should follow best practices, including explaining the context
|
||||
of the problem and how it was solved, including in caveats or follow up changes
|
||||
required. They should tell the story of the change and provide readers
|
||||
understanding of what led to it.
|
||||
|
||||
If you're lost about what this even means, please see [How to Write a Git
|
||||
Commit Message](http://chris.beams.io/posts/git-commit/) for a start.
|
||||
|
||||
In practice, the best approach to maintaining a nice commit message is to
|
||||
leverage a `git add -p` and `git commit --amend` to formulate a solid
|
||||
changeset. This allows one to piece together a change, as information becomes
|
||||
available.
|
||||
|
||||
If you squash a series of commits, don't just submit that. Re-write the commit
|
||||
message, as if the series of commits was a single stroke of brilliance.
|
||||
|
||||
That said, there is no requirement to have a single commit for a PR, as long as
|
||||
each commit tells the story. For example, if there is a feature that requires a
|
||||
package, it might make sense to have the package in a separate commit then have
|
||||
a subsequent commit that uses it.
|
||||
|
||||
Remember, you're telling part of the story with the commit message. Don't make
|
||||
your chapter weird.
|
||||
|
||||
## Sign your work
|
||||
|
||||
The sign-off is a simple line at the end of the explanation for the patch. Your
|
||||
signature certifies that you wrote the patch or otherwise have the right to pass
|
||||
it on as an open-source patch. The rules are pretty simple: if you can certify
|
||||
the below (from [developercertificate.org](http://developercertificate.org/)):
|
||||
|
||||
```
|
||||
Developer Certificate of Origin
|
||||
Version 1.1
|
||||
|
||||
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
|
||||
660 York Street, Suite 102,
|
||||
San Francisco, CA 94110 USA
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this
|
||||
license document, but changing it is not allowed.
|
||||
|
||||
Developer's Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to this project, I certify that:
|
||||
|
||||
(a) The contribution was created in whole or in part by me and I
|
||||
have the right to submit it under the open source license
|
||||
indicated in the file; or
|
||||
|
||||
(b) The contribution is based upon previous work that, to the best
|
||||
of my knowledge, is covered under an appropriate open source
|
||||
license and I have the right under that license to submit that
|
||||
work with modifications, whether created in whole or in part
|
||||
by me, under the same open source license (unless I am
|
||||
permitted to submit under a different license), as indicated
|
||||
in the file; or
|
||||
|
||||
(c) The contribution was provided directly to me by some other
|
||||
person who certified (a), (b) or (c) and I have not modified
|
||||
it.
|
||||
|
||||
(d) I understand and agree that this project and the contribution
|
||||
are public and that a record of the contribution (including all
|
||||
personal information I submit with it, including my sign-off) is
|
||||
maintained indefinitely and may be redistributed consistent with
|
||||
this project or the open source license(s) involved.
|
||||
```
|
||||
|
||||
Then you just add a line to every git commit message:
|
||||
|
||||
Signed-off-by: Joe Smith <joe.smith@email.com>
|
||||
|
||||
Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
191
vendor/github.com/containerd/containerd/LICENSE.code
generated
vendored
Normal file
191
vendor/github.com/containerd/containerd/LICENSE.code
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
395
vendor/github.com/containerd/containerd/LICENSE.docs
generated
vendored
Normal file
395
vendor/github.com/containerd/containerd/LICENSE.docs
generated
vendored
Normal file
|
@ -0,0 +1,395 @@
|
|||
Attribution 4.0 International
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
||||
does not provide legal services or legal advice. Distribution of
|
||||
Creative Commons public licenses does not create a lawyer-client or
|
||||
other relationship. Creative Commons makes its licenses and related
|
||||
information available on an "as-is" basis. Creative Commons gives no
|
||||
warranties regarding its licenses, any material licensed under their
|
||||
terms and conditions, or any related information. Creative Commons
|
||||
disclaims all liability for damages resulting from their use to the
|
||||
fullest extent possible.
|
||||
|
||||
Using Creative Commons Public Licenses
|
||||
|
||||
Creative Commons public licenses provide a standard set of terms and
|
||||
conditions that creators and other rights holders may use to share
|
||||
original works of authorship and other material subject to copyright
|
||||
and certain other rights specified in the public license below. The
|
||||
following considerations are for informational purposes only, are not
|
||||
exhaustive, and do not form part of our licenses.
|
||||
|
||||
Considerations for licensors: Our public licenses are
|
||||
intended for use by those authorized to give the public
|
||||
permission to use material in ways otherwise restricted by
|
||||
copyright and certain other rights. Our licenses are
|
||||
irrevocable. Licensors should read and understand the terms
|
||||
and conditions of the license they choose before applying it.
|
||||
Licensors should also secure all rights necessary before
|
||||
applying our licenses so that the public can reuse the
|
||||
material as expected. Licensors should clearly mark any
|
||||
material not subject to the license. This includes other CC-
|
||||
licensed material, or material used under an exception or
|
||||
limitation to copyright. More considerations for licensors:
|
||||
wiki.creativecommons.org/Considerations_for_licensors
|
||||
|
||||
Considerations for the public: By using one of our public
|
||||
licenses, a licensor grants the public permission to use the
|
||||
licensed material under specified terms and conditions. If
|
||||
the licensor's permission is not necessary for any reason--for
|
||||
example, because of any applicable exception or limitation to
|
||||
copyright--then that use is not regulated by the license. Our
|
||||
licenses grant only permissions under copyright and certain
|
||||
other rights that a licensor has authority to grant. Use of
|
||||
the licensed material may still be restricted for other
|
||||
reasons, including because others have copyright or other
|
||||
rights in the material. A licensor may make special requests,
|
||||
such as asking that all changes be marked or described.
|
||||
Although not required by our licenses, you are encouraged to
|
||||
respect those requests where reasonable. More_considerations
|
||||
for the public:
|
||||
wiki.creativecommons.org/Considerations_for_licensees
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons Attribution 4.0 International Public License
|
||||
|
||||
By exercising the Licensed Rights (defined below), You accept and agree
|
||||
to be bound by the terms and conditions of this Creative Commons
|
||||
Attribution 4.0 International Public License ("Public License"). To the
|
||||
extent this Public License may be interpreted as a contract, You are
|
||||
granted the Licensed Rights in consideration of Your acceptance of
|
||||
these terms and conditions, and the Licensor grants You such rights in
|
||||
consideration of benefits the Licensor receives from making the
|
||||
Licensed Material available under these terms and conditions.
|
||||
|
||||
|
||||
Section 1 -- Definitions.
|
||||
|
||||
a. Adapted Material means material subject to Copyright and Similar
|
||||
Rights that is derived from or based upon the Licensed Material
|
||||
and in which the Licensed Material is translated, altered,
|
||||
arranged, transformed, or otherwise modified in a manner requiring
|
||||
permission under the Copyright and Similar Rights held by the
|
||||
Licensor. For purposes of this Public License, where the Licensed
|
||||
Material is a musical work, performance, or sound recording,
|
||||
Adapted Material is always produced where the Licensed Material is
|
||||
synched in timed relation with a moving image.
|
||||
|
||||
b. Adapter's License means the license You apply to Your Copyright
|
||||
and Similar Rights in Your contributions to Adapted Material in
|
||||
accordance with the terms and conditions of this Public License.
|
||||
|
||||
c. Copyright and Similar Rights means copyright and/or similar rights
|
||||
closely related to copyright including, without limitation,
|
||||
performance, broadcast, sound recording, and Sui Generis Database
|
||||
Rights, without regard to how the rights are labeled or
|
||||
categorized. For purposes of this Public License, the rights
|
||||
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
||||
Rights.
|
||||
|
||||
d. Effective Technological Measures means those measures that, in the
|
||||
absence of proper authority, may not be circumvented under laws
|
||||
fulfilling obligations under Article 11 of the WIPO Copyright
|
||||
Treaty adopted on December 20, 1996, and/or similar international
|
||||
agreements.
|
||||
|
||||
e. Exceptions and Limitations means fair use, fair dealing, and/or
|
||||
any other exception or limitation to Copyright and Similar Rights
|
||||
that applies to Your use of the Licensed Material.
|
||||
|
||||
f. Licensed Material means the artistic or literary work, database,
|
||||
or other material to which the Licensor applied this Public
|
||||
License.
|
||||
|
||||
g. Licensed Rights means the rights granted to You subject to the
|
||||
terms and conditions of this Public License, which are limited to
|
||||
all Copyright and Similar Rights that apply to Your use of the
|
||||
Licensed Material and that the Licensor has authority to license.
|
||||
|
||||
h. Licensor means the individual(s) or entity(ies) granting rights
|
||||
under this Public License.
|
||||
|
||||
i. Share means to provide material to the public by any means or
|
||||
process that requires permission under the Licensed Rights, such
|
||||
as reproduction, public display, public performance, distribution,
|
||||
dissemination, communication, or importation, and to make material
|
||||
available to the public including in ways that members of the
|
||||
public may access the material from a place and at a time
|
||||
individually chosen by them.
|
||||
|
||||
j. Sui Generis Database Rights means rights other than copyright
|
||||
resulting from Directive 96/9/EC of the European Parliament and of
|
||||
the Council of 11 March 1996 on the legal protection of databases,
|
||||
as amended and/or succeeded, as well as other essentially
|
||||
equivalent rights anywhere in the world.
|
||||
|
||||
k. You means the individual or entity exercising the Licensed Rights
|
||||
under this Public License. Your has a corresponding meaning.
|
||||
|
||||
|
||||
Section 2 -- Scope.
|
||||
|
||||
a. License grant.
|
||||
|
||||
1. Subject to the terms and conditions of this Public License,
|
||||
the Licensor hereby grants You a worldwide, royalty-free,
|
||||
non-sublicensable, non-exclusive, irrevocable license to
|
||||
exercise the Licensed Rights in the Licensed Material to:
|
||||
|
||||
a. reproduce and Share the Licensed Material, in whole or
|
||||
in part; and
|
||||
|
||||
b. produce, reproduce, and Share Adapted Material.
|
||||
|
||||
2. Exceptions and Limitations. For the avoidance of doubt, where
|
||||
Exceptions and Limitations apply to Your use, this Public
|
||||
License does not apply, and You do not need to comply with
|
||||
its terms and conditions.
|
||||
|
||||
3. Term. The term of this Public License is specified in Section
|
||||
6(a).
|
||||
|
||||
4. Media and formats; technical modifications allowed. The
|
||||
Licensor authorizes You to exercise the Licensed Rights in
|
||||
all media and formats whether now known or hereafter created,
|
||||
and to make technical modifications necessary to do so. The
|
||||
Licensor waives and/or agrees not to assert any right or
|
||||
authority to forbid You from making technical modifications
|
||||
necessary to exercise the Licensed Rights, including
|
||||
technical modifications necessary to circumvent Effective
|
||||
Technological Measures. For purposes of this Public License,
|
||||
simply making modifications authorized by this Section 2(a)
|
||||
(4) never produces Adapted Material.
|
||||
|
||||
5. Downstream recipients.
|
||||
|
||||
a. Offer from the Licensor -- Licensed Material. Every
|
||||
recipient of the Licensed Material automatically
|
||||
receives an offer from the Licensor to exercise the
|
||||
Licensed Rights under the terms and conditions of this
|
||||
Public License.
|
||||
|
||||
b. No downstream restrictions. You may not offer or impose
|
||||
any additional or different terms or conditions on, or
|
||||
apply any Effective Technological Measures to, the
|
||||
Licensed Material if doing so restricts exercise of the
|
||||
Licensed Rights by any recipient of the Licensed
|
||||
Material.
|
||||
|
||||
6. No endorsement. Nothing in this Public License constitutes or
|
||||
may be construed as permission to assert or imply that You
|
||||
are, or that Your use of the Licensed Material is, connected
|
||||
with, or sponsored, endorsed, or granted official status by,
|
||||
the Licensor or others designated to receive attribution as
|
||||
provided in Section 3(a)(1)(A)(i).
|
||||
|
||||
b. Other rights.
|
||||
|
||||
1. Moral rights, such as the right of integrity, are not
|
||||
licensed under this Public License, nor are publicity,
|
||||
privacy, and/or other similar personality rights; however, to
|
||||
the extent possible, the Licensor waives and/or agrees not to
|
||||
assert any such rights held by the Licensor to the limited
|
||||
extent necessary to allow You to exercise the Licensed
|
||||
Rights, but not otherwise.
|
||||
|
||||
2. Patent and trademark rights are not licensed under this
|
||||
Public License.
|
||||
|
||||
3. To the extent possible, the Licensor waives any right to
|
||||
collect royalties from You for the exercise of the Licensed
|
||||
Rights, whether directly or through a collecting society
|
||||
under any voluntary or waivable statutory or compulsory
|
||||
licensing scheme. In all other cases the Licensor expressly
|
||||
reserves any right to collect such royalties.
|
||||
|
||||
|
||||
Section 3 -- License Conditions.
|
||||
|
||||
Your exercise of the Licensed Rights is expressly made subject to the
|
||||
following conditions.
|
||||
|
||||
a. Attribution.
|
||||
|
||||
1. If You Share the Licensed Material (including in modified
|
||||
form), You must:
|
||||
|
||||
a. retain the following if it is supplied by the Licensor
|
||||
with the Licensed Material:
|
||||
|
||||
i. identification of the creator(s) of the Licensed
|
||||
Material and any others designated to receive
|
||||
attribution, in any reasonable manner requested by
|
||||
the Licensor (including by pseudonym if
|
||||
designated);
|
||||
|
||||
ii. a copyright notice;
|
||||
|
||||
iii. a notice that refers to this Public License;
|
||||
|
||||
iv. a notice that refers to the disclaimer of
|
||||
warranties;
|
||||
|
||||
v. a URI or hyperlink to the Licensed Material to the
|
||||
extent reasonably practicable;
|
||||
|
||||
b. indicate if You modified the Licensed Material and
|
||||
retain an indication of any previous modifications; and
|
||||
|
||||
c. indicate the Licensed Material is licensed under this
|
||||
Public License, and include the text of, or the URI or
|
||||
hyperlink to, this Public License.
|
||||
|
||||
2. You may satisfy the conditions in Section 3(a)(1) in any
|
||||
reasonable manner based on the medium, means, and context in
|
||||
which You Share the Licensed Material. For example, it may be
|
||||
reasonable to satisfy the conditions by providing a URI or
|
||||
hyperlink to a resource that includes the required
|
||||
information.
|
||||
|
||||
3. If requested by the Licensor, You must remove any of the
|
||||
information required by Section 3(a)(1)(A) to the extent
|
||||
reasonably practicable.
|
||||
|
||||
4. If You Share Adapted Material You produce, the Adapter's
|
||||
License You apply must not prevent recipients of the Adapted
|
||||
Material from complying with this Public License.
|
||||
|
||||
|
||||
Section 4 -- Sui Generis Database Rights.
|
||||
|
||||
Where the Licensed Rights include Sui Generis Database Rights that
|
||||
apply to Your use of the Licensed Material:
|
||||
|
||||
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
||||
to extract, reuse, reproduce, and Share all or a substantial
|
||||
portion of the contents of the database;
|
||||
|
||||
b. if You include all or a substantial portion of the database
|
||||
contents in a database in which You have Sui Generis Database
|
||||
Rights, then the database in which You have Sui Generis Database
|
||||
Rights (but not its individual contents) is Adapted Material; and
|
||||
|
||||
c. You must comply with the conditions in Section 3(a) if You Share
|
||||
all or a substantial portion of the contents of the database.
|
||||
|
||||
For the avoidance of doubt, this Section 4 supplements and does not
|
||||
replace Your obligations under this Public License where the Licensed
|
||||
Rights include other Copyright and Similar Rights.
|
||||
|
||||
|
||||
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
||||
|
||||
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
||||
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
||||
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
||||
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
||||
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
||||
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
||||
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
||||
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
||||
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
||||
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
||||
|
||||
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
||||
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
||||
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
||||
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
||||
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
||||
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
||||
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
||||
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
||||
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
||||
|
||||
c. The disclaimer of warranties and limitation of liability provided
|
||||
above shall be interpreted in a manner that, to the extent
|
||||
possible, most closely approximates an absolute disclaimer and
|
||||
waiver of all liability.
|
||||
|
||||
|
||||
Section 6 -- Term and Termination.
|
||||
|
||||
a. This Public License applies for the term of the Copyright and
|
||||
Similar Rights licensed here. However, if You fail to comply with
|
||||
this Public License, then Your rights under this Public License
|
||||
terminate automatically.
|
||||
|
||||
b. Where Your right to use the Licensed Material has terminated under
|
||||
Section 6(a), it reinstates:
|
||||
|
||||
1. automatically as of the date the violation is cured, provided
|
||||
it is cured within 30 days of Your discovery of the
|
||||
violation; or
|
||||
|
||||
2. upon express reinstatement by the Licensor.
|
||||
|
||||
For the avoidance of doubt, this Section 6(b) does not affect any
|
||||
right the Licensor may have to seek remedies for Your violations
|
||||
of this Public License.
|
||||
|
||||
c. For the avoidance of doubt, the Licensor may also offer the
|
||||
Licensed Material under separate terms or conditions or stop
|
||||
distributing the Licensed Material at any time; however, doing so
|
||||
will not terminate this Public License.
|
||||
|
||||
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
||||
License.
|
||||
|
||||
|
||||
Section 7 -- Other Terms and Conditions.
|
||||
|
||||
a. The Licensor shall not be bound by any additional or different
|
||||
terms or conditions communicated by You unless expressly agreed.
|
||||
|
||||
b. Any arrangements, understandings, or agreements regarding the
|
||||
Licensed Material not stated herein are separate from and
|
||||
independent of the terms and conditions of this Public License.
|
||||
|
||||
|
||||
Section 8 -- Interpretation.
|
||||
|
||||
a. For the avoidance of doubt, this Public License does not, and
|
||||
shall not be interpreted to, reduce, limit, restrict, or impose
|
||||
conditions on any use of the Licensed Material that could lawfully
|
||||
be made without permission under this Public License.
|
||||
|
||||
b. To the extent possible, if any provision of this Public License is
|
||||
deemed unenforceable, it shall be automatically reformed to the
|
||||
minimum extent necessary to make it enforceable. If the provision
|
||||
cannot be reformed, it shall be severed from this Public License
|
||||
without affecting the enforceability of the remaining terms and
|
||||
conditions.
|
||||
|
||||
c. No term or condition of this Public License will be waived and no
|
||||
failure to comply consented to unless expressly agreed to by the
|
||||
Licensor.
|
||||
|
||||
d. Nothing in this Public License constitutes or may be interpreted
|
||||
as a limitation upon, or waiver of, any privileges and immunities
|
||||
that apply to the Licensor or You, including from the legal
|
||||
processes of any jurisdiction or authority.
|
||||
|
||||
|
||||
=======================================================================
|
||||
|
||||
Creative Commons is not a party to its public
|
||||
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
||||
its public licenses to material it publishes and in those instances
|
||||
will be considered the “Licensor.” The text of the Creative Commons
|
||||
public licenses is dedicated to the public domain under the CC0 Public
|
||||
Domain Dedication. Except for the limited purpose of indicating that
|
||||
material is shared under a Creative Commons public license or as
|
||||
otherwise permitted by the Creative Commons policies published at
|
||||
creativecommons.org/policies, Creative Commons does not authorize the
|
||||
use of the trademark "Creative Commons" or any other trademark or logo
|
||||
of Creative Commons without its prior written consent including,
|
||||
without limitation, in connection with any unauthorized modifications
|
||||
to any of its public licenses or any other arrangements,
|
||||
understandings, or agreements concerning use of licensed material. For
|
||||
the avoidance of doubt, this paragraph does not form part of the
|
||||
public licenses.
|
||||
|
||||
Creative Commons may be contacted at creativecommons.org.
|
251
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
Normal file
251
vendor/github.com/containerd/containerd/MAINTAINERS
generated
vendored
Normal file
|
@ -0,0 +1,251 @@
|
|||
# containerd project maintainers file
|
||||
#
|
||||
# This file describes who runs the containerd project and how.
|
||||
# This is a living document - if you see something out of date or missing,
|
||||
# speak up!
|
||||
#
|
||||
# It is structured to be consumable by both humans and programs.
|
||||
# To extract its contents programmatically, use any TOML-compliant
|
||||
# parser.
|
||||
|
||||
[Rules]
|
||||
|
||||
[Rules.maintainers]
|
||||
|
||||
title = "What is a maintainer?"
|
||||
|
||||
text = """
|
||||
There are different types of maintainers, with different responsibilities, but
|
||||
all maintainers have 3 things in common:
|
||||
|
||||
1) They share responsibility in the project's success.
|
||||
2) They have made a long-term, recurring time investment to improve the project.
|
||||
3) They spend that time doing whatever needs to be done, not necessarily what
|
||||
is the most interesting or fun.
|
||||
|
||||
Maintainers are often under-appreciated, because their work is harder to appreciate.
|
||||
It's easy to appreciate a really cool and technically advanced feature. It's harder
|
||||
to appreciate the absence of bugs, the slow but steady improvement in stability,
|
||||
or the reliability of a release process. But those things distinguish a good
|
||||
project from a great one.
|
||||
"""
|
||||
|
||||
[Rules.adding-maintainers]
|
||||
|
||||
title = "How are maintainers added?"
|
||||
|
||||
text = """
|
||||
Maintainers are first and foremost contributors that have shown they are
|
||||
committed to the long term success of a project. Contributors wanting to become
|
||||
maintainers are expected to be deeply involved in contributing code, pull
|
||||
request review, and triage of issues in the project for more than three months.
|
||||
|
||||
Just contributing does not make you a maintainer, it is about building trust
|
||||
with the current maintainers of the project and being a person that they can
|
||||
depend on and trust to make decisions in the best interest of the project.
|
||||
|
||||
Periodically, the existing maintainers curate a list of contributors that have
|
||||
shown regular activity on the project over the prior months. From this list,
|
||||
maintainer candidates are selected and proposed on the maintainers mailing list.
|
||||
|
||||
After a candidate has been announced on the maintainers mailing list, the
|
||||
existing maintainers are given five business days to discuss the candidate,
|
||||
raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing
|
||||
list. Only maintainers of the repository that the candidate is proposed for are
|
||||
allowed to vote.
|
||||
|
||||
If a candidate is approved, a maintainer will contact the candidate to invite
|
||||
the candidate to open a pull request that adds the contributor to the
|
||||
MAINTAINERS file. The candidate becomes a maintainer once the pull request is
|
||||
merged.
|
||||
"""
|
||||
|
||||
[Rules.adding-sub-projects]
|
||||
|
||||
title = "How are sub projects added?"
|
||||
|
||||
text = """
|
||||
Similar to adding maintainers, new sub projects can be added to containerd
|
||||
GitHub organization as long as they adhere to the CNCF
|
||||
[charter](https://www.cncf.io/about/charter/) and mission. After a project
|
||||
proposal has been announced on a public forum (GitHub issue or mailing list),
|
||||
the existing maintainers are given five business days to discuss the new
|
||||
project, raise objections and cast their vote. Projects must be approved by at
|
||||
least 66% of the current maintainers by adding their vote.
|
||||
|
||||
If a project is approved, a maintainer will add the project to the containerd
|
||||
GitHub organization, and make an announcement on a public forum.
|
||||
"""
|
||||
|
||||
[Rules.stepping-down-policy]
|
||||
|
||||
title = "Stepping down policy"
|
||||
|
||||
text = """
|
||||
Life priorities, interests, and passions can change. If you're a maintainer but
|
||||
feel you must remove yourself from the list, inform other maintainers that you
|
||||
intend to step down, and if possible, help find someone to pick up your work.
|
||||
At the very least, ensure your work can be continued where you left off.
|
||||
|
||||
After you've informed other maintainers, create a pull request to remove
|
||||
yourself from the MAINTAINERS file.
|
||||
"""
|
||||
|
||||
[Rules.inactive-maintainers]
|
||||
|
||||
title = "Removal of inactive maintainers"
|
||||
|
||||
text = """
|
||||
Similar to the procedure for adding new maintainers, existing maintainers can
|
||||
be removed from the list if they do not show significant activity on the
|
||||
project. Periodically, the maintainers review the list of maintainers and their
|
||||
activity over the last three months.
|
||||
|
||||
If a maintainer has shown insufficient activity over this period, a neutral
|
||||
person will contact the maintainer to ask if they want to continue being
|
||||
a maintainer. If the maintainer decides to step down as a maintainer, they
|
||||
open a pull request to be removed from the MAINTAINERS file.
|
||||
|
||||
If the maintainer wants to remain a maintainer, but is unable to perform the
|
||||
required duties they can be removed with a vote of at least 66% of
|
||||
the current maintainers. An e-mail is sent to the
|
||||
mailing list, inviting maintainers of the project to vote. The voting period is
|
||||
five business days. Issues related to a maintainer's performance should be
|
||||
discussed with them among the other maintainers so that they are not surprised
|
||||
by a pull request removing them.
|
||||
"""
|
||||
|
||||
[Rules.decisions]
|
||||
|
||||
title = "How are decisions made?"
|
||||
|
||||
text = """
|
||||
Short answer: EVERYTHING IS A PULL REQUEST.
|
||||
|
||||
containerd is an open-source project with an open design philosophy. This means
|
||||
that the repository is the source of truth for EVERY aspect of the project,
|
||||
including its philosophy, design, road map, and APIs. *If it's part of the
|
||||
project, it's in the repo. If it's in the repo, it's part of the project.*
|
||||
|
||||
As a result, all decisions can be expressed as changes to the repository. An
|
||||
implementation change is a change to the source code. An API change is a change
|
||||
to the API specification. A philosophy change is a change to the philosophy
|
||||
manifesto, and so on.
|
||||
|
||||
All decisions affecting containerd, big and small, follow the same 3 steps:
|
||||
|
||||
* Step 1: Open a pull request. Anyone can do this.
|
||||
|
||||
* Step 2: Discuss the pull request. Anyone can do this.
|
||||
|
||||
* Step 3: Merge or refuse the pull request. Who does this depends on the nature
|
||||
of the pull request and which areas of the project it affects.
|
||||
"""
|
||||
|
||||
[Rules.DCO]
|
||||
|
||||
title = "Helping contributors with the DCO"
|
||||
|
||||
text = """
|
||||
The [DCO or `Sign your work`](
|
||||
https://github.com/containerd/containerd/blob/master/CONTRIBUTING.md#sign-your-work)
|
||||
requirement is not intended as a roadblock or speed bump.
|
||||
|
||||
Some containerd contributors are not as familiar with `git`, or have used a web
|
||||
based editor, and thus asking them to `git commit --amend -s` is not the best
|
||||
way forward.
|
||||
|
||||
In this case, maintainers can update the commits based on clause (c) of the DCO.
|
||||
The most trivial way for a contributor to allow the maintainer to do this, is to
|
||||
add a DCO signature in a pull requests's comment, or a maintainer can simply
|
||||
note that the change is sufficiently trivial that it does not substantially
|
||||
change the existing contribution - i.e., a spelling change.
|
||||
|
||||
When you add someone's DCO, please also add your own to keep a log.
|
||||
"""
|
||||
|
||||
[Rules."no direct push"]
|
||||
|
||||
title = "I'm a maintainer. Should I make pull requests too?"
|
||||
|
||||
text = """
|
||||
Yes. Nobody should ever push to master directly. All changes should be
|
||||
made through a pull request.
|
||||
"""
|
||||
|
||||
[Rules.tsc]
|
||||
|
||||
title = "Conflict Resolution and technical disputes"
|
||||
|
||||
text = """
|
||||
containerd defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters."
|
||||
"""
|
||||
|
||||
[Rules.meta]
|
||||
|
||||
title = "How is this process changed?"
|
||||
|
||||
text = "Just like everything else: by making a pull request :)"
|
||||
|
||||
# Current project organization
|
||||
[Org]
|
||||
|
||||
[Org.Maintainers]
|
||||
people = [
|
||||
"akihirosuda",
|
||||
"crosbymichael",
|
||||
"dqminh",
|
||||
"dmcgowan",
|
||||
"estesp",
|
||||
"hqhq",
|
||||
"jhowardmsft",
|
||||
"mlaventure",
|
||||
"stevvooe",
|
||||
]
|
||||
|
||||
[People]
|
||||
|
||||
[people.akihirosuda]
|
||||
Name = "Akihiro Suda"
|
||||
Email = "suda.akihiro@lab.ntt.co.jp"
|
||||
GitHub = "AkihiroSuda"
|
||||
|
||||
[People.crosbymichael]
|
||||
Name = "Michael Crosby"
|
||||
Email = "crosbymichael@gmail.com"
|
||||
GitHub = "crosbymichael"
|
||||
|
||||
[People.dqminh]
|
||||
Name = "Daniel, Dao Quang Minh"
|
||||
Email = "dqminh89@gmail.com"
|
||||
GitHub = "dqminh"
|
||||
|
||||
[people.dmcgowan]
|
||||
Name = "Derek McGowan"
|
||||
Email = "derek@mcgstyle.net"
|
||||
GitHub = "dmcgowan"
|
||||
|
||||
[People.estesp]
|
||||
Name = "Phil Estes"
|
||||
Email = "estesp@gmail.com"
|
||||
GitHub = "estesp"
|
||||
|
||||
[People.hqhq]
|
||||
Name = "Qiang Huang"
|
||||
Email = "h.huangqiang@huawei.com"
|
||||
GitHub = "hqhq"
|
||||
|
||||
[People.jhowardmsft]
|
||||
Name = "John Howard"
|
||||
Email = "jhoward@microsoft.com"
|
||||
GitHub = "jhowardmsft"
|
||||
|
||||
[People.mlaventure]
|
||||
Name = "Kenfe-Mickaël Laventure"
|
||||
Email = "mickael.laventure@gmail.com"
|
||||
GitHub = "mlaventure"
|
||||
|
||||
[People.stevvooe]
|
||||
Name = "Stephen Day"
|
||||
Email = "stephen.day@docker.com"
|
||||
GitHub = "stevvooe"
|
197
vendor/github.com/containerd/containerd/Makefile
generated
vendored
Normal file
197
vendor/github.com/containerd/containerd/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,197 @@
|
|||
# Root directory of the project (absolute path).
|
||||
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
|
||||
|
||||
# Base path used to install.
|
||||
DESTDIR=/usr/local
|
||||
|
||||
# Used to populate variables in version package.
|
||||
VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always)
|
||||
REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi)
|
||||
|
||||
ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
GOARCH ?= $(shell go env GOARCH)
|
||||
else
|
||||
GOOS ?= $$GOOS
|
||||
GOARCH ?= $$GOARCH
|
||||
endif
|
||||
|
||||
WHALE = "🇩"
|
||||
ONI = "👹"
|
||||
|
||||
RELEASE=containerd-$(VERSION:v%=%).${GOOS}-${GOARCH}
|
||||
|
||||
PKG=github.com/containerd/containerd
|
||||
|
||||
# Project packages.
|
||||
PACKAGES=$(shell go list ./... | grep -v /vendor/)
|
||||
INTEGRATION_PACKAGE=${PKG}
|
||||
TEST_REQUIRES_ROOT_PACKAGES=$(filter \
|
||||
${PACKAGES}, \
|
||||
$(shell \
|
||||
for f in $$(git grep -l testutil.RequiresRoot | grep -v Makefile); do \
|
||||
d="$$(dirname $$f)"; \
|
||||
[ "$$d" = "." ] && echo "${PKG}" && continue; \
|
||||
echo "${PKG}/$$d"; \
|
||||
done | sort -u) \
|
||||
)
|
||||
|
||||
# Project binaries.
|
||||
COMMANDS=ctr containerd containerd-stress containerd-release
|
||||
BINARIES=$(addprefix bin/,$(COMMANDS))
|
||||
|
||||
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
|
||||
GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
|
||||
SHIM_GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) -extldflags "-static"'
|
||||
|
||||
TESTFLAGS_RACE=
|
||||
GO_GCFLAGS=
|
||||
|
||||
#Detect the target os
|
||||
include Makefile.OS
|
||||
#include platform specific makefile
|
||||
include Makefile.$(target_os)
|
||||
|
||||
# Flags passed to `go test`
|
||||
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
|
||||
TESTFLAGS_PARALLEL ?= 8
|
||||
|
||||
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release
|
||||
.DEFAULT: default
|
||||
|
||||
all: binaries
|
||||
|
||||
check: proto-fmt ## run all linters
|
||||
@echo "$(WHALE) $@"
|
||||
gometalinter --config .gometalinter.json ./...
|
||||
|
||||
ci: check binaries checkprotos coverage coverage-integration ## to be used by the CI
|
||||
|
||||
AUTHORS: .mailmap .git/HEAD
|
||||
git log --format='%aN <%aE>' | sort -fu > $@
|
||||
|
||||
generate: protos
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH=${ROOTDIR}/bin:${PATH} go generate -x ${PACKAGES}
|
||||
|
||||
protos: bin/protoc-gen-gogoctrd ## generate protobuf
|
||||
@echo "$(WHALE) $@"
|
||||
@PATH=${ROOTDIR}/bin:${PATH} protobuild --quiet ${PACKAGES}
|
||||
|
||||
check-protos: protos ## check if protobufs needs to be generated again
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(git status --short | grep ".pb.go" | tee /dev/stderr)" || \
|
||||
((git diff | cat) && \
|
||||
(echo "$(ONI) please run 'make protos' when making changes to proto files" && false))
|
||||
|
||||
check-api-descriptors: protos ## check that protobuf changes aren't present.
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(git status --short | grep ".pb.txt" | tee /dev/stderr)" || \
|
||||
((git diff $$(find . -name '*.pb.txt') | cat) && \
|
||||
(echo "$(ONI) please run 'make protos' when making changes to proto files and check-in the generated descriptor file changes" && false))
|
||||
|
||||
proto-fmt: ## check format of proto files
|
||||
@echo "$(WHALE) $@"
|
||||
@test -z "$$(find . -path ./vendor -prune -o -path ./protobuf/google/rpc -prune -o -name '*.proto' -type f -exec grep -Hn -e "^ " {} \; | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) please indent proto files with tabs only" && false)
|
||||
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
|
||||
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
|
||||
|
||||
dco: ## dco check
|
||||
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
|
||||
ifdef TRAVIS_COMMIT_RANGE
|
||||
git-validation -q -run DCO,short-subject,dangling-whitespace
|
||||
else
|
||||
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
|
||||
endif
|
||||
|
||||
build: ## build the go packages
|
||||
@echo "$(WHALE) $@"
|
||||
@go build ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
|
||||
|
||||
test: ## run tests, except integration tests and tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES})
|
||||
|
||||
root-test: ## run tests, except integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) -test.root
|
||||
|
||||
integration: ## run integration tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} -test.root -parallel ${TESTFLAGS_PARALLEL}
|
||||
|
||||
benchmark: ## run benchmarks tests
|
||||
@echo "$(WHALE) $@"
|
||||
@go test ${TESTFLAGS} -bench . -run Benchmark -test.root
|
||||
|
||||
FORCE:
|
||||
|
||||
# Build a binary from a cmd.
|
||||
bin/%: cmd/% FORCE
|
||||
@echo "$(WHALE) $@${BINARY_SUFFIX}"
|
||||
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
|
||||
|
||||
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
|
||||
@echo "$(WHALE) bin/containerd-shim"
|
||||
@CGO_ENABLED=0 go build -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
|
||||
|
||||
binaries: $(BINARIES) ## build binaries
|
||||
@echo "$(WHALE) $@"
|
||||
|
||||
release: $(BINARIES)
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz
|
||||
@install -d releases/$(RELEASE)/bin
|
||||
@install $(BINARIES) releases/$(RELEASE)/bin
|
||||
@cd releases/$(RELEASE) && tar -czf ../$(RELEASE).tar.gz *
|
||||
|
||||
clean: ## clean up binaries
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(BINARIES)
|
||||
|
||||
install: ## install binaries
|
||||
@echo "$(WHALE) $@ $(BINARIES)"
|
||||
@mkdir -p $(DESTDIR)/bin
|
||||
@install $(BINARIES) $(DESTDIR)/bin
|
||||
|
||||
uninstall:
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f $(addprefix $(DESTDIR)/bin/,$(notdir $(BINARIES)))
|
||||
|
||||
|
||||
coverage: ## generate coverprofiles from the unit tests, except tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@rm -f coverage.txt
|
||||
@go test -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) 2> /dev/null
|
||||
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}); do \
|
||||
go test ${TESTFLAGS} \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-covermode=atomic $$pkg || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
root-coverage: ## generate coverage profiles for unit tests that require root
|
||||
@echo "$(WHALE) $@"
|
||||
@go test -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}) 2> /dev/null
|
||||
@( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${TEST_REQUIRES_ROOT_PACKAGES}); do \
|
||||
go test ${TESTFLAGS} \
|
||||
-cover \
|
||||
-coverprofile=profile.out \
|
||||
-covermode=atomic $$pkg -test.root || exit; \
|
||||
if [ -f profile.out ]; then \
|
||||
cat profile.out >> coverage.txt; \
|
||||
rm profile.out; \
|
||||
fi; \
|
||||
done )
|
||||
|
||||
vendor:
|
||||
@echo "$(WHALE) $@"
|
||||
@vndr
|
||||
|
||||
help: ## this help
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort
|
16
vendor/github.com/containerd/containerd/Makefile.OS
generated
vendored
Normal file
16
vendor/github.com/containerd/containerd/Makefile.OS
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
#Detect the OS, and return installos value same as GOOS
|
||||
target_os =
|
||||
ifeq ($(OS),Windows_NT)
|
||||
target_os = windows
|
||||
else
|
||||
UNAME_S := $(shell uname -s)
|
||||
ifeq ($(UNAME_S),Linux)
|
||||
target_os = linux
|
||||
endif
|
||||
ifeq ($(UNAME_S),Darwin)
|
||||
target_os = darwin
|
||||
endif
|
||||
ifeq ($(UNAME_S),FreeBSD)
|
||||
target_os = freebsd
|
||||
endif
|
||||
endif
|
7
vendor/github.com/containerd/containerd/Makefile.darwin
generated
vendored
Normal file
7
vendor/github.com/containerd/containerd/Makefile.darwin
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
#darwin specific settings
|
||||
COMMANDS += containerd-shim
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
7
vendor/github.com/containerd/containerd/Makefile.freebsd
generated
vendored
Normal file
7
vendor/github.com/containerd/containerd/Makefile.freebsd
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
#freebsd specific settings
|
||||
COMMANDS += containerd-shim
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
12
vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
Normal file
12
vendor/github.com/containerd/containerd/Makefile.linux
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
#linux specific settings
|
||||
COMMANDS += containerd-shim
|
||||
|
||||
# check GOOS for cross compile builds
|
||||
ifeq ($(GOOS),linux)
|
||||
GO_GCFLAGS= -buildmode=pie
|
||||
endif
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
10
vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
Normal file
10
vendor/github.com/containerd/containerd/Makefile.windows
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
#Windows specific settings.
|
||||
WHALE = "+"
|
||||
ONI = "-"
|
||||
|
||||
BINARY_SUFFIX=".exe"
|
||||
|
||||
# amd64 supports go test -race
|
||||
ifeq ($(GOARCH),amd64)
|
||||
TESTFLAGS_RACE= -race
|
||||
endif
|
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
67
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
67
vendor/github.com/containerd/containerd/Protobuild.toml
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
version = "unstable"
|
||||
generator = "gogoctrd"
|
||||
plugins = ["grpc", "fieldpath"]
|
||||
|
||||
# Control protoc include paths. Below are usually some good defaults, but feel
|
||||
# free to try it without them if it works for your project.
|
||||
[includes]
|
||||
# Include paths that will be added before all others. Typically, you want to
|
||||
# treat the root of the project as an include, but this may not be necessary.
|
||||
before = ["./protobuf"]
|
||||
|
||||
# Paths that should be treated as include roots in relation to the vendor
|
||||
# directory. These will be calculated with the vendor directory nearest the
|
||||
# target package.
|
||||
packages = ["github.com/gogo/protobuf"]
|
||||
|
||||
# Paths that will be added untouched to the end of the includes. We use
|
||||
# `/usr/local/include` to pickup the common install location of protobuf.
|
||||
# This is the default.
|
||||
after = ["/usr/local/include"]
|
||||
|
||||
# This section maps protobuf imports to Go packages. These will become
|
||||
# `-M` directives in the call to the go protobuf generator.
|
||||
[packages]
|
||||
"gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto"
|
||||
"google/protobuf/any.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/empty.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
|
||||
"google/rpc/status.proto" = "github.com/containerd/containerd/protobuf/google/rpc"
|
||||
|
||||
[[overrides]]
|
||||
prefixes = ["github.com/containerd/containerd/api/events"]
|
||||
plugins = ["fieldpath"] # disable grpc for this package
|
||||
|
||||
[[overrides]]
|
||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||
prefixes = ["github.com/containerd/containerd/linux/shim/v1"]
|
||||
plugins = ["ttrpc"]
|
||||
|
||||
# Aggregrate the API descriptors to lock down API changes.
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/api"
|
||||
target = "api/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
# Lock down runc config
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/linux/runctypes"
|
||||
target = "linux/runctypes/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
||||
|
||||
[[descriptors]]
|
||||
prefix = "github.com/containerd/containerd/windows/hcsshimtypes"
|
||||
target = "windows/hcsshimtypes/next.pb.txt"
|
||||
ignore_files = [
|
||||
"google/protobuf/descriptor.proto",
|
||||
"gogoproto/gogo.proto"
|
||||
]
|
215
vendor/github.com/containerd/containerd/README.md
generated
vendored
Normal file
215
vendor/github.com/containerd/containerd/README.md
generated
vendored
Normal file
|
@ -0,0 +1,215 @@
|
|||

|
||||
|
||||
[](https://godoc.org/github.com/containerd/containerd)
|
||||
[](https://travis-ci.org/containerd/containerd)
|
||||
[](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
|
||||
[](https://goreportcard.com/report/github.com/containerd/containerd)
|
||||
|
||||
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
|
||||
|
||||
containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
|
||||
|
||||

|
||||
|
||||
## Getting Started
|
||||
|
||||
See our documentation on [containerd.io](containerd.io):
|
||||
* [for ops and admins](docs/ops.md)
|
||||
* [namespaces](docs/namespaces.md)
|
||||
* [client options](docs/client-opts.md)
|
||||
|
||||
See how to build containerd from source at [BUILDING](BUILDING.md).
|
||||
|
||||
If you are interested in trying out containerd see our example at [Getting Started](docs/getting-started.md).
|
||||
|
||||
|
||||
## Runtime Requirements
|
||||
|
||||
Runtime requirements for containerd are very minimal. Most interactions with
|
||||
the Linux and Windows container feature sets are handled via [runc](https://github.com/opencontainers/runc) and/or
|
||||
OS-specific libraries (e.g. [hcsshim](https://github.com/Microsoft/hcsshim) for Microsoft). The current required version of `runc` is always listed in [RUNC.md](/RUNC.md).
|
||||
|
||||
There are specific features
|
||||
used by containerd core code and snapshotters that will require a minimum kernel
|
||||
version on Linux. With the understood caveat of distro kernel versioning, a
|
||||
reasonable starting point for Linux is a minimum 4.x kernel version.
|
||||
|
||||
The overlay filesystem snapshotter, used by default, uses features that were
|
||||
finalized in the 4.x kernel series. If you choose to use btrfs, there may
|
||||
be more flexibility in kernel version (minimum recommended is 3.18), but will
|
||||
require the btrfs kernel module and btrfs tools to be installed on your Linux
|
||||
distribution.
|
||||
|
||||
To use Linux checkpoint and restore features, you will need `criu` installed on
|
||||
your system. See more details in [Checkpoint and Restore](#checkpoint-and-restore).
|
||||
|
||||
Build requirements for developers are listed in [BUILDING](BUILDING.md).
|
||||
|
||||
## Features
|
||||
|
||||
### Client
|
||||
|
||||
containerd offers a full client package to help you integrate containerd into your platform.
|
||||
|
||||
```go
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd"
|
||||
"github.com/containerd/containerd/cio"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
client, err := containerd.New("/run/containerd/containerd.sock")
|
||||
defer client.Close()
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
### Namespaces
|
||||
|
||||
Namespaces allow multiple consumers to use the same containerd without conflicting with each other. It has the benefit of sharing content but still having separation with containers and images.
|
||||
|
||||
To set a namespace for requests to the API:
|
||||
|
||||
```go
|
||||
context = context.Background()
|
||||
// create a context for docker
|
||||
docker = namespaces.WithNamespace(context, "docker")
|
||||
|
||||
containerd, err := client.NewContainer(docker, "id")
|
||||
```
|
||||
|
||||
To set a default namespace on the client:
|
||||
|
||||
```go
|
||||
client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
|
||||
```
|
||||
|
||||
### Distribution
|
||||
|
||||
```go
|
||||
// pull an image
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest")
|
||||
|
||||
// push an image
|
||||
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||
```
|
||||
|
||||
### Containers
|
||||
|
||||
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master")
|
||||
defer redis.Delete(context)
|
||||
```
|
||||
|
||||
### OCI Runtime Specification
|
||||
|
||||
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||
|
||||
You can specify options when creating a container about how to modify the specification.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(oci.WithImageConfig(image)))
|
||||
```
|
||||
|
||||
### Root Filesystems
|
||||
|
||||
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
|
||||
|
||||
```go
|
||||
// pull an image and unpack it into the configured snapshotter
|
||||
image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
|
||||
|
||||
// allocate a new RW root filesystem for a container based on the image
|
||||
redis, err := client.NewContainer(context, "redis-master",
|
||||
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
|
||||
// use a readonly filesystem with multiple containers
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprintf("id-%s", i)
|
||||
container, err := client.NewContainer(ctx, id,
|
||||
containerd.WithNewSnapshotView(id, image),
|
||||
containerd.WithNewSpec(oci.WithImageConfig(image)),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
### Tasks
|
||||
|
||||
Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container. A task represents the runnable object within containerd.
|
||||
|
||||
```go
|
||||
// create a new task
|
||||
task, err := redis.NewTask(context, cio.Stdio)
|
||||
defer task.Delete(context)
|
||||
|
||||
// the task is now running and has a pid that can be use to setup networking
|
||||
// or other runtime settings outside of containerd
|
||||
pid := task.Pid()
|
||||
|
||||
// start the redis-server process inside the container
|
||||
err := task.Start(context)
|
||||
|
||||
// wait for the task to exit and get the exit status
|
||||
status, err := task.Wait(context)
|
||||
```
|
||||
|
||||
### Checkpoint and Restore
|
||||
|
||||
If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks. This allow you to clone and/or live migrate containers to other machines.
|
||||
|
||||
```go
|
||||
// checkpoint the task then push it to a registry
|
||||
checkpoint, err := task.Checkpoint(context, containerd.WithExit)
|
||||
|
||||
err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
|
||||
|
||||
// on a new machine pull the checkpoint and restore the redis container
|
||||
image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
|
||||
|
||||
checkpoint := image.Target()
|
||||
|
||||
redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
|
||||
defer container.Delete(context)
|
||||
|
||||
task, err = redis.NewTask(context, cio.Stdio, containerd.WithTaskCheckpoint(checkpoint))
|
||||
defer task.Delete(context)
|
||||
|
||||
err := task.Start(context)
|
||||
```
|
||||
|
||||
### Releases and API Stability
|
||||
|
||||
Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
|
||||
of containerd components.
|
||||
|
||||
### Development reports.
|
||||
|
||||
Weekly summary on the progress and what is being worked on.
|
||||
https://github.com/containerd/containerd/tree/master/reports
|
||||
|
||||
### Communication
|
||||
|
||||
For async communication and long running discussions please use issues and pull requests on the github repo.
|
||||
This will be the best place to discuss design and implementation.
|
||||
|
||||
For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
|
||||
|
||||
**Slack:** https://dockr.ly/community
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
|
||||
## Licenses
|
||||
|
||||
The containerd codebase is released under the [Apache 2.0 license](LICENSE.code).
|
||||
The README.md file, and files in the "docs" folder are licensed under the
|
||||
Creative Commons Attribution 4.0 International License under the terms and
|
||||
conditions set forth in the file "[LICENSE.docs](LICENSE.docs)". You may obtain a duplicate
|
||||
copy of the same license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
|
272
vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
Normal file
272
vendor/github.com/containerd/containerd/RELEASES.md
generated
vendored
Normal file
|
@ -0,0 +1,272 @@
|
|||
# Versioning and Release
|
||||
|
||||
This document details the versioning and release plan for containerd. Stability
|
||||
is a top goal for this project and we hope that this document and the processes
|
||||
it entails will help to achieve that. It covers the release process, versioning
|
||||
numbering, backporting, API stability and support horizons.
|
||||
|
||||
If you rely on containerd, it would be good to spend time understanding the
|
||||
areas of the API that are and are not supported and how they impact your
|
||||
project in the future.
|
||||
|
||||
This document will be considered a living document. Supported timelines,
|
||||
backport targets and API stability guarantees will be updated here as they
|
||||
change.
|
||||
|
||||
If there is something that you require or this document leaves out, please
|
||||
reach out by [filing an issue](https://github.com/containerd/containerd/issues).
|
||||
|
||||
## Releases
|
||||
|
||||
Releases of containerd will be versioned using dotted triples, similar to
|
||||
[Semantic Version](http://semver.org/). For the purposes of this document, we
|
||||
will refer to the respective components of this triple as
|
||||
`<major>.<minor>.<patch>`. The version number may have additional information,
|
||||
such as alpha, beta and release candidate qualifications. Such releases will be
|
||||
considered "pre-releases".
|
||||
|
||||
### Major and Minor Releases
|
||||
|
||||
Major and minor releases of containerd will be made from master. Releases of
|
||||
containerd will be marked with GPG signed tags and announced at
|
||||
https://github.com/containerd/containerd/releases. The tag will be of the
|
||||
format `v<major>.<minor>.<patch>` and should be made with the command `git tag
|
||||
-s v<major>.<minor>.<patch>`.
|
||||
|
||||
After a minor release, a branch will be created, with the format
|
||||
`release/<major>.<minor>` from the minor tag. All further patch releases will
|
||||
be done from that branch. For example, once we release `v1.0.0`, a branch
|
||||
`release/1.0` will be created from that tag. All future patch releases will be
|
||||
done against that branch.
|
||||
|
||||
### Pre-releases
|
||||
|
||||
Pre-releases, such as alphas, betas and release candidates will be conducted
|
||||
from their source branch. For major and minor releases, these releases will be
|
||||
done from master. For patch releases, these pre-releases should be done within
|
||||
the corresponding release branch.
|
||||
|
||||
While pre-releases are done to assist in the stabilization process, no
|
||||
guarantees are provided.
|
||||
|
||||
### Upgrade Path
|
||||
|
||||
The upgrade path for containerd is such that the 0.0.x patch releases are
|
||||
always backward compatible with its major and minor version. Minor (0.x.0)
|
||||
version will always be compatible with the previous minor release. i.e. 1.2.0
|
||||
is backwards compatible with 1.1.0 and 1.1.0 is compatible with 1.0.0. There is
|
||||
no compatibility guarantees for upgrades that span multiple, _minor_ releases.
|
||||
For example, 1.0.0 to 1.2.0 is not supported. One should first upgrade to 1.1,
|
||||
then 1.2.
|
||||
|
||||
There are no compatibility guarantees with upgrades to _major_ versions. For
|
||||
example, upgrading from 1.0.0 to 2.0.0 may require resources to migrated or
|
||||
integrations to change. Each major version will be supported for at least 1
|
||||
year with bug fixes and security patches.
|
||||
|
||||
### Next Release
|
||||
|
||||
The activity for the next release will be tracked in the
|
||||
[milestones](https://github.com/containerd/containerd/milestones). If your
|
||||
issue or PR is not present in a milestone, please reach out to the maintainers
|
||||
to create the milestone or add an issue or PR to an existing milestone.
|
||||
|
||||
### Support Horizon
|
||||
|
||||
Support horizons will be defined corresponding to a release branch, identified
|
||||
by `<major>.<minor>`. Releases branches will be in one of several states:
|
||||
|
||||
- __*Next*__: The next planned release branch.
|
||||
- __*Active*__: The release is currently supported and accepting patches.
|
||||
- __*End of Life*__: The release branch is no longer support and no new patches will be accepted.
|
||||
|
||||
Releases will be supported up to one year after a _minor_ release. This means that
|
||||
we will accept bug reports and backports to release branches until the end of
|
||||
life date. If no new _minor_ release has been made, that release will be
|
||||
considered supported until the next _minor_ is released or one year, whichever
|
||||
is longer.
|
||||
|
||||
The current state is available in the following table:
|
||||
|
||||
| Release | Status | Start | End of Life |
|
||||
|---------|-------------|------------------|-------------------|
|
||||
| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - |
|
||||
| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - |
|
||||
| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 |
|
||||
| [1.0](https://github.com/containerd/contaienrd/releases/tag/v1.0.0) | Active | December 5, 2017 | max(December 5, 2018, release of 1.1.0) |
|
||||
| [1.1](https://github.com/containerd/containerd/milestone/15) | Next | TBD | max(TBD+1 year, release of 1.2.0) |
|
||||
|
||||
Note that branches and release from before 1.0 may not follow these rules.
|
||||
|
||||
This table should be updated as part of the release preparation process.
|
||||
|
||||
### Backporting
|
||||
|
||||
Backports in containerd are community driven. As maintainers, we'll try to
|
||||
ensure that sensible bugfixes make it into _active_ release, but our main focus
|
||||
will be features for the next _minor_ or _major_ release. For the most part,
|
||||
this process is straightforward and we are here to help make it as smooth as
|
||||
possible.
|
||||
|
||||
If there are important fixes that need to be backported, please let use know in
|
||||
one of three ways:
|
||||
|
||||
1. Open an issue.
|
||||
2. Open a PR with cherry-picked change from master.
|
||||
3. Open a PR with a ported fix.
|
||||
|
||||
__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
|
||||
Remember that backported PRs must follow the versioning guidelines from this document.
|
||||
|
||||
Any release that is "active" can accept backports. Opening a backport PR is
|
||||
fairly straightforward. The steps differ depending on whether you are pulling
|
||||
a fix from master or need to draft a new commit specific to a particular
|
||||
branch.
|
||||
|
||||
To cherry pick a straightforward commit from master, simply use the cherry pick
|
||||
process:
|
||||
|
||||
1. Pick the branch to which you want backported, usually in the format
|
||||
`release/<minor>.<major>`. The following will create a branch you can
|
||||
use to open a PR:
|
||||
|
||||
```console
|
||||
$ git checkout -b my-backport-branch release/<major>.<minor>.
|
||||
```
|
||||
|
||||
2. Find the commit you want backported.
|
||||
3. Apply it to the release branch:
|
||||
|
||||
```console
|
||||
$ git cherry-pick -xsS <commit>
|
||||
```
|
||||
4. Push the branch and open up a PR against the _release branch_:
|
||||
|
||||
```
|
||||
$ git push -u stevvooe my-backport-branch
|
||||
```
|
||||
|
||||
Make sure to replace `stevvooe` with whatever fork you are using to open
|
||||
the PR. When you open the PR, make sure to switch `master` with whatever
|
||||
release branch you are targeting with the fix.
|
||||
|
||||
If there is no existing fix in master, you should first fix the bug in master,
|
||||
or ask us a maintainer or contributor to do it via an issue. Once that PR is
|
||||
completed, open a PR using the process above.
|
||||
|
||||
Only when the bug is not seen in master and must be made for the specific
|
||||
release branch should you open a PR with new code.
|
||||
|
||||
## Public API Stability
|
||||
|
||||
The following table provides an overview of the components covered by
|
||||
containerd versions:
|
||||
|
||||
|
||||
| Component | Status | Stablized Version | Links |
|
||||
|---------------|----------|-------------------|---------------|
|
||||
| GRPC API | Beta | 1.0 | [api/](api) |
|
||||
| Metrics API | Beta | 1.0 | -
|
||||
| Go client API | Unstable | 1.1 tentative | [godoc](https://godoc.org/github.com/containerd/containerd) |
|
||||
| `ctr` tool | Unstable | Out of scope | - |
|
||||
|
||||
From the version stated in the above table, that component must adhere to the
|
||||
stability constraints expected in release versions.
|
||||
|
||||
Unless explicitly stated here, components that are called out as unstable or
|
||||
not covered may change in a future minor version. Breaking changes to
|
||||
"unstable" components will be avoided in patch versions.
|
||||
|
||||
### GRPC API
|
||||
|
||||
The primary product of containerd is the GRPC API. As of the 1.0.0 release, the
|
||||
GRPC API will not have any backwards incompatible changes without a _major_
|
||||
version jump.
|
||||
|
||||
To ensure compatibility, we have collected the entire GRPC API symbol set into
|
||||
a single file. At each _minor_ release of containerd, we will move the current
|
||||
`next.pb.txt` file to a file named for the minor version, such as `1.0.pb.txt`,
|
||||
enumerating the support services and messages. See [api/](api) for details.
|
||||
|
||||
Note that new services may be added in _minor_ releases. New service methods
|
||||
and new fields on messages may be added if they are optional.
|
||||
|
||||
### Metrics API
|
||||
|
||||
The metrics API that outputs prometheus style metrics will be versioned independently,
|
||||
prefixed with the API version. i.e. `/v1/metrics`, `/v2/metrics`.
|
||||
|
||||
The metrics API version will be incremented when breaking changes are made to the prometheus
|
||||
output. New metrics can be added to the output in a backwards compatible manner without
|
||||
bumping the API version.
|
||||
|
||||
### Plugins API
|
||||
|
||||
containerd is based on a modular design where plugins are implemented to provide the core functionality.
|
||||
Plugins implemented in tree are supported by the containerd community unless explicitly specified as non-stable.
|
||||
Out of tree plugins are not supported by the containerd maintainers.
|
||||
|
||||
Currently, the Windows runtime and snapshot plugins are not stable and not supported.
|
||||
Please refer to the github milestones for Windows support in a future release.
|
||||
|
||||
#### Error Codes
|
||||
|
||||
Error codes will not change in a patch release, unless a missing error code
|
||||
causes a blocking bug. Error codes of type "unknown" may change to more
|
||||
specific types in the future. Any error code that is not "unknown" that is
|
||||
currently returned by a service will not change without a _major_ release or a
|
||||
new version of the service.
|
||||
|
||||
If you find that an error code that is required by your application is not
|
||||
well-documented in the protobuf service description or tested explicitly,
|
||||
please file and issue and we will clarify.
|
||||
|
||||
#### Opaque Fields
|
||||
|
||||
Unless explicitly stated, the formats of certain fields may not be covered by
|
||||
this guarantee and should be treated opaquely. For example, don't rely on the
|
||||
format details of a URL field unless we explicitly say that the field will
|
||||
follow that format.
|
||||
|
||||
### Go client API
|
||||
|
||||
The Go client API, documented in
|
||||
[godoc](https://godoc.org/github.com/containerd/containerd), is currently
|
||||
considered unstable. It is recommended to vendor the necessary components to
|
||||
stabilize your project build. Note that because the Go API interfaces with the
|
||||
GRPC API, clients written against a 1.0 Go API should remain compatible with
|
||||
future 1.x series releases.
|
||||
|
||||
We intend to stabilize the API in a future release when more integrations have
|
||||
been carried out.
|
||||
|
||||
Any changes to the API should be detectable at compile time, so upgrading will
|
||||
be a matter of fixing compilation errors and moving from there.
|
||||
|
||||
### `ctr` tool
|
||||
|
||||
The `ctr` tool provides the ability to introspect and understand the containerd
|
||||
API. At this time, it is not considered a primary offering of the project. It
|
||||
may be completely refactored or have breaking changes in _minor_ releases.
|
||||
|
||||
We will try not break the tool in _patch_ releases.
|
||||
|
||||
### Not Covered
|
||||
|
||||
As a general rule, anything not mentioned in this document is not covered by
|
||||
the stability guidelines and may change in any release. Explicitly, this
|
||||
pertains to this non-exhaustive list of components:
|
||||
|
||||
- File System layout
|
||||
- Storage formats
|
||||
- Snapshot formats
|
||||
|
||||
Between upgrades of subsequent, _minor_ versions, we may migrate these formats.
|
||||
Any outside processes relying on details of these file system layouts may break
|
||||
in that process. Container root file systems will be maintained on upgrade.
|
||||
|
||||
### Exceptions
|
||||
|
||||
We may make exceptions in the interest of __security patches__. If a break is
|
||||
required, it will be communicated clearly and the solution will be considered
|
||||
against total impact.
|
3
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
Normal file
3
vendor/github.com/containerd/containerd/ROADMAP.md
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
# containerd Roadmap
|
||||
|
||||
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.
|
27
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
Normal file
27
vendor/github.com/containerd/containerd/RUNC.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
containerd is built with OCI support and with support for advanced features provided by [runc](https://github.com/opencontainers/runc).
|
||||
|
||||
We depend on a specific `runc` version when dealing with advanced features. You should have a specific runc build for development. The current supported runc commit is:
|
||||
|
||||
RUNC_COMMIT = 9f9c96235cc97674e935002fc3d78361b696a69e
|
||||
|
||||
For more information on how to clone and build runc see the runc Building [documentation](https://github.com/opencontainers/runc#building).
|
||||
|
||||
Note: before building you may need to install additional support, which will vary by platform. For example, you may need to install `libseccomp` and `libapparmor` e.g. `libseccomp-dev` and `libapparmor-dev` for Ubuntu.
|
||||
|
||||
## building
|
||||
|
||||
From within your `opencontainers/runc` repository run:
|
||||
|
||||
### apparmor
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp apparmor' && sudo make install
|
||||
```
|
||||
|
||||
### selinux
|
||||
|
||||
```bash
|
||||
make BUILDTAGS='seccomp selinux' && sudo make install
|
||||
```
|
||||
|
||||
After an official runc release we will start pinning containerd support to a specific version but various development and testing features may require a newer runc version than the latest release. If you encounter any runtime errors, please make sure your runc is in sync with the commit/tag provided in this document.
|
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
57
vendor/github.com/containerd/containerd/SCOPE.md
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
# Scope and Principles
|
||||
|
||||
Having a clearly defined scope of a project is important for ensuring consistency and focus.
|
||||
These following criteria will be used when reviewing pull requests, features, and changes for the project before being accepted.
|
||||
|
||||
### Components
|
||||
|
||||
Components should not have tight dependencies on each other so that they are able to be used independently.
|
||||
The APIs for images and containers should be designed in a way that when used together the components have a natural flow but still be useful independently.
|
||||
|
||||
An example for this design can be seen with the overlay filesystems and the container execution layer.
|
||||
The execution layer and overlay filesystems can be used independently but if you were to use both, they share a common `Mount` struct that the filesystems produce and the execution layer consumes.
|
||||
|
||||
### Primitives
|
||||
|
||||
containerd should expose primitives to solve problems instead of building high level abstractions in the API.
|
||||
A common example of this is how build would be implemented.
|
||||
Instead of having a build API in containerd we should expose the lower level primitives that allow things required in build to work.
|
||||
Breaking up the filesystem APIs to allow snapshots, copy functionality, and mounts allow people implementing build at the higher levels with more flexibility.
|
||||
|
||||
### Extensibility and Defaults
|
||||
|
||||
For the various components in containerd there should be defined extension points where implementations can be swapped for alternatives.
|
||||
The best example of this is that containerd will use `runc` from OCI as the default runtime in the execution layer but other runtimes conforming to the OCI Runtime specification can be easily added to containerd.
|
||||
|
||||
containerd will come with a default implementation for the various components.
|
||||
These defaults will be chosen by the maintainers of the project and should not change unless better tech for that component comes out.
|
||||
Additional implementations will not be accepted into the core repository and should be developed in a separate repository not maintained by the containerd maintainers.
|
||||
|
||||
|
||||
## Scope
|
||||
|
||||
The following table specifies the various components of containerd and general features of container runtimes.
|
||||
The table specifies whether or not the feature/component is in or out of scope.
|
||||
|
||||
| Name | Description | In/Out | Reason |
|
||||
|------------------------------|--------------------------------------------------------------------------------------------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| execution | Provide an extensible execution layer for executing a container | in | Create,start, stop pause, resume exec, signal, delete |
|
||||
| cow filesystem | Built in functionality for overlay, aufs, and other copy on write filesystems for containers | in | |
|
||||
| distribution | Having the ability to push and pull images as well as operations on images as a first class API object | in | containerd will fully support the management and retrieval of images |
|
||||
| metrics | container-level metrics, cgroup stats, and OOM events | in |
|
||||
| networking | creation and management of network interfaces | out | Networking will be handled and provided to containerd via higher level systems. |
|
||||
| build | Building images as a first class API | out | Build is a higher level tooling feature and can be implemented in many different ways on top of containerd |
|
||||
| volumes | Volume management for external data | out | The API supports mounts, binds, etc where all volumes type systems can be built on top of containerd. |
|
||||
| logging | Persisting container logs | out | Logging can be build on top of containerd because the container’s STDIO will be provided to the clients and they can persist any way they see fit. There is no io copying of container STDIO in containerd. |
|
||||
|
||||
|
||||
containerd is scoped to a single host and makes assumptions based on that fact.
|
||||
It can be used to build things like a node agent that launches containers but does not have any concepts of a distributed system.
|
||||
|
||||
containerd is designed to be embedded into a larger system, hence it only includes a barebone CLI (`ctr`) specifically for development and debugging purpose, with no mandate to be human-friendly, and no guarantee of interface stability over time.
|
||||
|
||||
### How is the scope changed?
|
||||
|
||||
The scope of this project is a whitelist.
|
||||
If it's not mentioned as being in scope, it is out of scope.
|
||||
For the scope of this project to change it requires a 100% vote from all maintainers of the project.
|
108
vendor/github.com/containerd/containerd/benchmark_test.go
generated
vendored
Normal file
108
vendor/github.com/containerd/containerd/benchmark_test.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
)
|
||||
|
||||
func BenchmarkContainerCreate(b *testing.B) {
|
||||
client, err := newClient(b, address)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := oci.GenerateSpec(ctx, client, &containers.Container{ID: b.Name()}, oci.WithImageConfig(image), withTrue())
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
var containers []Container
|
||||
defer func() {
|
||||
for _, c := range containers {
|
||||
if err := c.Delete(ctx, WithSnapshotCleanup); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// reset the timer before creating containers
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
id := fmt.Sprintf("%s-%d", b.Name(), i)
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
containers = append(containers, container)
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func BenchmarkContainerStart(b *testing.B) {
|
||||
client, err := newClient(b, address)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
spec, err := oci.GenerateSpec(ctx, client, &containers.Container{ID: b.Name()}, oci.WithImageConfig(image), withTrue())
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
var containers []Container
|
||||
defer func() {
|
||||
for _, c := range containers {
|
||||
if err := c.Delete(ctx, WithSnapshotCleanup); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
id := fmt.Sprintf("%s-%d", b.Name(), i)
|
||||
container, err := client.NewContainer(ctx, id, WithSpec(spec), WithNewSnapshot(id, image))
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
containers = append(containers, container)
|
||||
|
||||
}
|
||||
// reset the timer before starting tasks
|
||||
b.ResetTimer()
|
||||
for _, c := range containers {
|
||||
task, err := c.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
if err := task.Start(ctx); err != nil {
|
||||
b.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
b.StopTimer()
|
||||
}
|
598
vendor/github.com/containerd/containerd/client.go
generated
vendored
Normal file
598
vendor/github.com/containerd/containerd/client.go
generated
vendored
Normal file
|
@ -0,0 +1,598 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||
introspectionapi "github.com/containerd/containerd/api/services/introspection/v1"
|
||||
namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
|
||||
snapshotsapi "github.com/containerd/containerd/api/services/snapshots/v1"
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
versionservice "github.com/containerd/containerd/api/services/version/v1"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/dialer"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/containerd/typeurl"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
const prefix = "types.containerd.io"
|
||||
// register TypeUrls for commonly marshaled external types
|
||||
major := strconv.Itoa(specs.VersionMajor)
|
||||
typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
||||
typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process")
|
||||
typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources")
|
||||
typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources")
|
||||
}
|
||||
|
||||
// New returns a new containerd client that is connected to the containerd
|
||||
// instance provided by address
|
||||
func New(address string, opts ...ClientOpt) (*Client, error) {
|
||||
var copts clientOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&copts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
gopts := []grpc.DialOption{
|
||||
grpc.WithBlock(),
|
||||
grpc.WithInsecure(),
|
||||
grpc.WithTimeout(60 * time.Second),
|
||||
grpc.FailOnNonTempDialError(true),
|
||||
grpc.WithBackoffMaxDelay(3 * time.Second),
|
||||
grpc.WithDialer(dialer.Dialer),
|
||||
}
|
||||
if len(copts.dialOptions) > 0 {
|
||||
gopts = copts.dialOptions
|
||||
}
|
||||
if copts.defaultns != "" {
|
||||
unary, stream := newNSInterceptors(copts.defaultns)
|
||||
gopts = append(gopts,
|
||||
grpc.WithUnaryInterceptor(unary),
|
||||
grpc.WithStreamInterceptor(stream),
|
||||
)
|
||||
}
|
||||
conn, err := grpc.Dial(dialer.DialAddress(address), gopts...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to dial %q", address)
|
||||
}
|
||||
return NewWithConn(conn, opts...)
|
||||
}
|
||||
|
||||
// NewWithConn returns a new containerd client that is connected to the containerd
|
||||
// instance provided by the connection
|
||||
func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
|
||||
return &Client{
|
||||
conn: conn,
|
||||
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Client is the client to interact with containerd and its various services
|
||||
// using a uniform interface
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
runtime string
|
||||
}
|
||||
|
||||
// IsServing returns true if the client can successfully connect to the
|
||||
// containerd daemon and the healthcheck service returns the SERVING
|
||||
// response.
|
||||
// This call will block if a transient error is encountered during
|
||||
// connection. A timeout can be set in the context to ensure it returns
|
||||
// early.
|
||||
func (c *Client) IsServing(ctx context.Context) (bool, error) {
|
||||
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return r.Status == grpc_health_v1.HealthCheckResponse_SERVING, nil
|
||||
}
|
||||
|
||||
// Containers returns all containers created in containerd
|
||||
func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
|
||||
r, err := c.ContainerService().List(ctx, filters...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var out []Container
|
||||
for _, container := range r {
|
||||
out = append(out, containerFromRecord(c, container))
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// NewContainer will create a new container in container with the provided id
|
||||
// the id must be unique within the namespace
|
||||
func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done()
|
||||
|
||||
container := containers.Container{
|
||||
ID: id,
|
||||
Runtime: containers.RuntimeInfo{
|
||||
Name: c.runtime,
|
||||
},
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c, &container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
r, err := c.ContainerService().Create(ctx, container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containerFromRecord(c, r), nil
|
||||
}
|
||||
|
||||
// LoadContainer loads an existing container from metadata
|
||||
func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
|
||||
r, err := c.ContainerService().Get(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containerFromRecord(c, r), nil
|
||||
}
|
||||
|
||||
// RemoteContext is used to configure object resolutions and transfers with
|
||||
// remote content stores and image providers.
|
||||
type RemoteContext struct {
|
||||
// Resolver is used to resolve names to objects, fetchers, and pushers.
|
||||
// If no resolver is provided, defaults to Docker registry resolver.
|
||||
Resolver remotes.Resolver
|
||||
|
||||
// Unpack is done after an image is pulled to extract into a snapshotter.
|
||||
// If an image is not unpacked on pull, it can be unpacked any time
|
||||
// afterwards. Unpacking is required to run an image.
|
||||
Unpack bool
|
||||
|
||||
// Snapshotter used for unpacking
|
||||
Snapshotter string
|
||||
|
||||
// Labels to be applied to the created image
|
||||
Labels map[string]string
|
||||
|
||||
// BaseHandlers are a set of handlers which get are called on dispatch.
|
||||
// These handlers always get called before any operation specific
|
||||
// handlers.
|
||||
BaseHandlers []images.Handler
|
||||
|
||||
// ConvertSchema1 is whether to convert Docker registry schema 1
|
||||
// manifests. If this option is false then any image which resolves
|
||||
// to schema 1 will return an error since schema 1 is not supported.
|
||||
ConvertSchema1 bool
|
||||
}
|
||||
|
||||
func defaultRemoteContext() *RemoteContext {
|
||||
return &RemoteContext{
|
||||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||
Client: http.DefaultClient,
|
||||
}),
|
||||
Snapshotter: DefaultSnapshotter,
|
||||
}
|
||||
}
|
||||
|
||||
// Pull downloads the provided content into containerd's content store
|
||||
func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
|
||||
pullCtx := defaultRemoteContext()
|
||||
for _, o := range opts {
|
||||
if err := o(c, pullCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
store := c.ContentStore()
|
||||
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done()
|
||||
|
||||
name, desc, err := pullCtx.Resolver.Resolve(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fetcher, err := pullCtx.Resolver.Fetcher(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
schema1Converter *schema1.Converter
|
||||
handler images.Handler
|
||||
)
|
||||
if desc.MediaType == images.MediaTypeDockerSchema1Manifest && pullCtx.ConvertSchema1 {
|
||||
schema1Converter = schema1.NewConverter(store, fetcher)
|
||||
handler = images.Handlers(append(pullCtx.BaseHandlers, schema1Converter)...)
|
||||
} else {
|
||||
handler = images.Handlers(append(pullCtx.BaseHandlers,
|
||||
remotes.FetchHandler(store, fetcher),
|
||||
images.ChildrenHandler(store, platforms.Default()))...,
|
||||
)
|
||||
}
|
||||
|
||||
if err := images.Dispatch(ctx, handler, desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if schema1Converter != nil {
|
||||
desc, err = schema1Converter.Convert(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
imgrec := images.Image{
|
||||
Name: name,
|
||||
Target: desc,
|
||||
Labels: pullCtx.Labels,
|
||||
}
|
||||
|
||||
is := c.ImageService()
|
||||
if created, err := is.Create(ctx, imgrec); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
updated, err := is.Update(ctx, imgrec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgrec = updated
|
||||
} else {
|
||||
imgrec = created
|
||||
}
|
||||
|
||||
img := &image{
|
||||
client: c,
|
||||
i: imgrec,
|
||||
}
|
||||
if pullCtx.Unpack {
|
||||
if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return img, nil
|
||||
}
|
||||
|
||||
// Push uploads the provided content to a remote resource
|
||||
func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
|
||||
pushCtx := defaultRemoteContext()
|
||||
for _, o := range opts {
|
||||
if err := o(c, pushCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var m sync.Mutex
|
||||
manifestStack := []ocispec.Descriptor{}
|
||||
|
||||
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
|
||||
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
m.Lock()
|
||||
manifestStack = append(manifestStack, desc)
|
||||
m.Unlock()
|
||||
return nil, images.ErrStopHandler
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
})
|
||||
|
||||
cs := c.ContentStore()
|
||||
pushHandler := remotes.PushHandler(cs, pusher)
|
||||
|
||||
handlers := append(pushCtx.BaseHandlers,
|
||||
images.ChildrenHandler(cs, platforms.Default()),
|
||||
filterHandler,
|
||||
pushHandler,
|
||||
)
|
||||
|
||||
if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Iterate in reverse order as seen, parent always uploaded after child
|
||||
for i := len(manifestStack) - 1; i >= 0; i-- {
|
||||
_, err := pushHandler(ctx, manifestStack[i])
|
||||
if err != nil {
|
||||
// TODO(estesp): until we have a more complete method for index push, we need to report
|
||||
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
|
||||
// as a marker for this problem
|
||||
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
|
||||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
|
||||
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
|
||||
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetImage returns an existing image
|
||||
func (c *Client) GetImage(ctx context.Context, ref string) (Image, error) {
|
||||
i, err := c.ImageService().Get(ctx, ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &image{
|
||||
client: c,
|
||||
i: i,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListImages returns all existing images
|
||||
func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, error) {
|
||||
imgs, err := c.ImageService().List(ctx, filters...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
images := make([]Image, len(imgs))
|
||||
for i, img := range imgs {
|
||||
images[i] = &image{
|
||||
client: c,
|
||||
i: img,
|
||||
}
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
// Subscribe to events that match one or more of the provided filters.
|
||||
//
|
||||
// Callers should listen on both the envelope channel and errs channel. If the
|
||||
// errs channel returns nil or an error, the subscriber should terminate.
|
||||
//
|
||||
// To cancel shutdown reciept of events, cancel the provided context. The errs
|
||||
// channel will be closed and return a nil error.
|
||||
func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evq = make(chan *eventsapi.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
)
|
||||
|
||||
errs = errq
|
||||
ch = evq
|
||||
|
||||
session, err := c.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
errq <- err
|
||||
close(errq)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(errq)
|
||||
|
||||
for {
|
||||
ev, err := session.Recv()
|
||||
if err != nil {
|
||||
errq <- err
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case evq <- ev:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errs
|
||||
}
|
||||
|
||||
// Close closes the clients connection to containerd
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// NamespaceService returns the underlying Namespaces Store
|
||||
func (c *Client) NamespaceService() namespaces.Store {
|
||||
return NewNamespaceStoreFromClient(namespacesapi.NewNamespacesClient(c.conn))
|
||||
}
|
||||
|
||||
// ContainerService returns the underlying container Store
|
||||
func (c *Client) ContainerService() containers.Store {
|
||||
return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
|
||||
}
|
||||
|
||||
// ContentStore returns the underlying content Store
|
||||
func (c *Client) ContentStore() content.Store {
|
||||
return NewContentStoreFromClient(contentapi.NewContentClient(c.conn))
|
||||
}
|
||||
|
||||
// SnapshotService returns the underlying snapshotter for the provided snapshotter name
|
||||
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
|
||||
return NewSnapshotterFromClient(snapshotsapi.NewSnapshotsClient(c.conn), snapshotterName)
|
||||
}
|
||||
|
||||
// TaskService returns the underlying TasksClient
|
||||
func (c *Client) TaskService() tasks.TasksClient {
|
||||
return tasks.NewTasksClient(c.conn)
|
||||
}
|
||||
|
||||
// ImageService returns the underlying image Store
|
||||
func (c *Client) ImageService() images.Store {
|
||||
return NewImageStoreFromClient(imagesapi.NewImagesClient(c.conn))
|
||||
}
|
||||
|
||||
// DiffService returns the underlying Differ
|
||||
func (c *Client) DiffService() diff.Differ {
|
||||
return NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
|
||||
}
|
||||
|
||||
// IntrospectionService returns the underlying Introspection Client
|
||||
func (c *Client) IntrospectionService() introspectionapi.IntrospectionClient {
|
||||
return introspectionapi.NewIntrospectionClient(c.conn)
|
||||
}
|
||||
|
||||
// HealthService returns the underlying GRPC HealthClient
|
||||
func (c *Client) HealthService() grpc_health_v1.HealthClient {
|
||||
return grpc_health_v1.NewHealthClient(c.conn)
|
||||
}
|
||||
|
||||
// EventService returns the underlying EventsClient
|
||||
func (c *Client) EventService() eventsapi.EventsClient {
|
||||
return eventsapi.NewEventsClient(c.conn)
|
||||
}
|
||||
|
||||
// VersionService returns the underlying VersionClient
|
||||
func (c *Client) VersionService() versionservice.VersionClient {
|
||||
return versionservice.NewVersionClient(c.conn)
|
||||
}
|
||||
|
||||
// Version of containerd
|
||||
type Version struct {
|
||||
// Version number
|
||||
Version string
|
||||
// Revision from git that was built
|
||||
Revision string
|
||||
}
|
||||
|
||||
// Version returns the version of containerd that the client is connected to
|
||||
func (c *Client) Version(ctx context.Context) (Version, error) {
|
||||
response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
return Version{
|
||||
Version: response.Version,
|
||||
Revision: response.Revision,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type importOpts struct {
|
||||
}
|
||||
|
||||
// ImportOpt allows the caller to specify import specific options
|
||||
type ImportOpt func(c *importOpts) error
|
||||
|
||||
func resolveImportOpt(opts ...ImportOpt) (importOpts, error) {
|
||||
var iopts importOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&iopts); err != nil {
|
||||
return iopts, err
|
||||
}
|
||||
}
|
||||
return iopts, nil
|
||||
}
|
||||
|
||||
// Import imports an image from a Tar stream using reader.
|
||||
// Caller needs to specify importer. Future version may use oci.v1 as the default.
|
||||
// Note that unreferrenced blobs may be imported to the content store as well.
|
||||
func (c *Client) Import(ctx context.Context, importer images.Importer, reader io.Reader, opts ...ImportOpt) ([]Image, error) {
|
||||
_, err := resolveImportOpt(opts...) // unused now
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, done, err := c.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer done()
|
||||
|
||||
imgrecs, err := importer.Import(ctx, c.ContentStore(), reader)
|
||||
if err != nil {
|
||||
// is.Update() is not called on error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
is := c.ImageService()
|
||||
var images []Image
|
||||
for _, imgrec := range imgrecs {
|
||||
if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
|
||||
if !errdefs.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
created, err := is.Create(ctx, imgrec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
imgrec = created
|
||||
} else {
|
||||
imgrec = updated
|
||||
}
|
||||
|
||||
images = append(images, &image{
|
||||
client: c,
|
||||
i: imgrec,
|
||||
})
|
||||
}
|
||||
return images, nil
|
||||
}
|
||||
|
||||
type exportOpts struct {
|
||||
}
|
||||
|
||||
// ExportOpt allows the caller to specify export-specific options
|
||||
type ExportOpt func(c *exportOpts) error
|
||||
|
||||
func resolveExportOpt(opts ...ExportOpt) (exportOpts, error) {
|
||||
var eopts exportOpts
|
||||
for _, o := range opts {
|
||||
if err := o(&eopts); err != nil {
|
||||
return eopts, err
|
||||
}
|
||||
}
|
||||
return eopts, nil
|
||||
}
|
||||
|
||||
// Export exports an image to a Tar stream.
|
||||
// OCI format is used by default.
|
||||
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||
// TODO(AkihiroSuda): support exporting multiple descriptors at once to a single archive stream.
|
||||
func (c *Client) Export(ctx context.Context, exporter images.Exporter, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
|
||||
_, err := resolveExportOpt(opts...) // unused now
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
pw.CloseWithError(exporter.Export(ctx, c.ContentStore(), desc, pw))
|
||||
}()
|
||||
return pr, nil
|
||||
}
|
103
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
Normal file
103
vendor/github.com/containerd/containerd/client_opts.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type clientOpts struct {
|
||||
defaultns string
|
||||
dialOptions []grpc.DialOption
|
||||
}
|
||||
|
||||
// ClientOpt allows callers to set options on the containerd client
|
||||
type ClientOpt func(c *clientOpts) error
|
||||
|
||||
// WithDefaultNamespace sets the default namespace on the client
|
||||
//
|
||||
// Any operation that does not have a namespace set on the context will
|
||||
// be provided the default namespace
|
||||
func WithDefaultNamespace(ns string) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.defaultns = ns
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDialOpts allows grpc.DialOptions to be set on the connection
|
||||
func WithDialOpts(opts []grpc.DialOption) ClientOpt {
|
||||
return func(c *clientOpts) error {
|
||||
c.dialOptions = opts
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteOpt allows the caller to set distribution options for a remote
|
||||
type RemoteOpt func(*Client, *RemoteContext) error
|
||||
|
||||
// WithPullUnpack is used to unpack an image after pull. This
|
||||
// uses the snapshotter, content store, and diff service
|
||||
// configured for the client.
|
||||
func WithPullUnpack(_ *Client, c *RemoteContext) error {
|
||||
c.Unpack = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithPullSnapshotter specifies snapshotter name used for unpacking
|
||||
func WithPullSnapshotter(snapshotterName string) RemoteOpt {
|
||||
return func(_ *Client, c *RemoteContext) error {
|
||||
c.Snapshotter = snapshotterName
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullLabel sets a label to be associated with a pulled reference
|
||||
func WithPullLabel(key, value string) RemoteOpt {
|
||||
return func(_ *Client, rc *RemoteContext) error {
|
||||
if rc.Labels == nil {
|
||||
rc.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
rc.Labels[key] = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPullLabels associates a set of labels to a pulled reference
|
||||
func WithPullLabels(labels map[string]string) RemoteOpt {
|
||||
return func(_ *Client, rc *RemoteContext) error {
|
||||
if rc.Labels == nil {
|
||||
rc.Labels = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range labels {
|
||||
rc.Labels[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSchema1Conversion is used to convert Docker registry schema 1
|
||||
// manifests to oci manifests on pull. Without this option schema 1
|
||||
// manifests will return a not supported error.
|
||||
func WithSchema1Conversion(client *Client, c *RemoteContext) error {
|
||||
c.ConvertSchema1 = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithResolver specifies the resolver to use.
|
||||
func WithResolver(resolver remotes.Resolver) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.Resolver = resolver
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImageHandler adds a base handler to be called on dispatch.
|
||||
func WithImageHandler(h images.Handler) RemoteOpt {
|
||||
return func(client *Client, c *RemoteContext) error {
|
||||
c.BaseHandlers = append(c.BaseHandlers, h)
|
||||
return nil
|
||||
}
|
||||
}
|
190
vendor/github.com/containerd/containerd/client_test.go
generated
vendored
Normal file
190
vendor/github.com/containerd/containerd/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
golog "log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/testutil"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
address string
|
||||
noDaemon bool
|
||||
noCriu bool
|
||||
supportsCriu bool
|
||||
|
||||
ctrd = &daemon{}
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Discard grpc logs so that they don't mess with our stdio
|
||||
grpclog.SetLogger(golog.New(ioutil.Discard, "", golog.LstdFlags))
|
||||
|
||||
flag.StringVar(&address, "address", defaultAddress, "The address to the containerd socket for use in the tests")
|
||||
flag.BoolVar(&noDaemon, "no-daemon", false, "Do not start a dedicated daemon for the tests")
|
||||
flag.BoolVar(&noCriu, "no-criu", false, "Do not run the checkpoint tests")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func testContext() (context.Context, context.CancelFunc) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx = namespaces.WithNamespace(ctx, "testing")
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if testing.Short() {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
testutil.RequiresRootM()
|
||||
// check if criu is installed on the system
|
||||
_, err := exec.LookPath("criu")
|
||||
supportsCriu = err == nil && !noCriu
|
||||
|
||||
var (
|
||||
buf = bytes.NewBuffer(nil)
|
||||
ctx, cancel = testContext()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
if !noDaemon {
|
||||
os.RemoveAll(defaultRoot)
|
||||
|
||||
err := ctrd.start("containerd", address, []string{
|
||||
"--root", defaultRoot,
|
||||
"--state", defaultState,
|
||||
"--log-level", "debug",
|
||||
}, buf, buf)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s: %s", err, buf.String())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
client, err := ctrd.waitForStart(waitCtx)
|
||||
waitCancel()
|
||||
if err != nil {
|
||||
ctrd.Kill()
|
||||
ctrd.Wait()
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String())
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// print out the version in information
|
||||
version, err := client.Version(ctx)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "error getting version: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// allow comparison with containerd under test
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"version": version.Version,
|
||||
"revision": version.Revision,
|
||||
}).Info("running tests against containerd")
|
||||
|
||||
// pull a seed image
|
||||
if runtime.GOOS != "windows" { // TODO: remove once pull is supported on windows
|
||||
if _, err = client.Pull(ctx, testImage, WithPullUnpack); err != nil {
|
||||
ctrd.Stop()
|
||||
ctrd.Wait()
|
||||
fmt.Fprintf(os.Stderr, "%s: %s\n", err, buf.String())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
if err := platformTestSetup(client); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "platform test setup failed", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := client.Close(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to close client", err)
|
||||
}
|
||||
|
||||
// run the test
|
||||
status := m.Run()
|
||||
|
||||
if !noDaemon {
|
||||
// tear down the daemon and resources created
|
||||
if err := ctrd.Stop(); err != nil {
|
||||
if err := ctrd.Kill(); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to signal containerd", err)
|
||||
}
|
||||
}
|
||||
if err := ctrd.Wait(); err != nil {
|
||||
if _, ok := err.(*exec.ExitError); !ok {
|
||||
fmt.Fprintln(os.Stderr, "failed to wait for containerd", err)
|
||||
}
|
||||
}
|
||||
if err := os.RemoveAll(defaultRoot); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "failed to remove test root dir", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
// only print containerd logs if the test failed
|
||||
if status != 0 {
|
||||
fmt.Fprintln(os.Stderr, buf.String())
|
||||
}
|
||||
}
|
||||
os.Exit(status)
|
||||
}
|
||||
|
||||
func newClient(t testing.TB, address string, opts ...ClientOpt) (*Client, error) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
// testutil.RequiresRoot(t) is not needed here (already called in TestMain)
|
||||
return New(address, opts...)
|
||||
}
|
||||
|
||||
func TestNewClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if client == nil {
|
||||
t.Fatal("New() returned nil client")
|
||||
}
|
||||
if err := client.Close(); err != nil {
|
||||
t.Errorf("client closed returned errror %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// All the container's tests depends on this, we need it to run first.
|
||||
func TestImagePull(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
// TODO: remove once Windows has a snapshotter
|
||||
t.Skip("Windows does not have a snapshotter yet")
|
||||
}
|
||||
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
_, err = client.Pull(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
38
vendor/github.com/containerd/containerd/client_unix_test.go
generated
vendored
Normal file
38
vendor/github.com/containerd/containerd/client_unix_test.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultRoot = "/var/lib/containerd-test"
|
||||
defaultState = "/run/containerd-test"
|
||||
defaultAddress = "/run/containerd-test/containerd.sock"
|
||||
)
|
||||
|
||||
var (
|
||||
testImage string
|
||||
)
|
||||
|
||||
func platformTestSetup(client *Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
switch runtime.GOARCH {
|
||||
case "386":
|
||||
testImage = "docker.io/i386/alpine:latest"
|
||||
case "arm":
|
||||
testImage = "docker.io/arm32v6/alpine:latest"
|
||||
case "arm64":
|
||||
testImage = "docker.io/arm64v8/alpine:latest"
|
||||
case "ppc64le":
|
||||
testImage = "docker.io/ppc64le/alpine:latest"
|
||||
case "s390x":
|
||||
testImage = "docker.io/s390x/alpine:latest"
|
||||
default:
|
||||
testImage = "docker.io/library/alpine:latest"
|
||||
}
|
||||
}
|
88
vendor/github.com/containerd/containerd/client_windows_test.go
generated
vendored
Normal file
88
vendor/github.com/containerd/containerd/client_windows_test.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddress = `\\.\pipe\containerd-containerd-test`
|
||||
testImage = "docker.io/library/go:nanoserver"
|
||||
)
|
||||
|
||||
var (
|
||||
dockerLayerFolders []string
|
||||
|
||||
defaultRoot = filepath.Join(os.Getenv("programfiles"), "containerd", "root-test")
|
||||
defaultState = filepath.Join(os.Getenv("programfiles"), "containerd", "state-test")
|
||||
)
|
||||
|
||||
func platformTestSetup(client *Client) error {
|
||||
var (
|
||||
roots []string
|
||||
layerChains = make(map[string]string)
|
||||
)
|
||||
// Since we can't pull images yet, we'll piggyback on the default
|
||||
// docker's images
|
||||
wfPath := `C:\ProgramData\docker\windowsfilter`
|
||||
wf, err := os.Open(wfPath)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to access docker layers @ %s", wfPath)
|
||||
}
|
||||
defer wf.Close()
|
||||
entries, err := wf.Readdirnames(0)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read %s entries", wfPath)
|
||||
}
|
||||
|
||||
for _, fn := range entries {
|
||||
layerChainPath := filepath.Join(wfPath, fn, "layerchain.json")
|
||||
lfi, err := os.Stat(layerChainPath)
|
||||
switch {
|
||||
case err == nil && lfi.Mode().IsRegular():
|
||||
f, err := os.OpenFile(layerChainPath, os.O_RDONLY, 0660)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr,
|
||||
errors.Wrapf(err, "failed to open %s", layerChainPath))
|
||||
continue
|
||||
}
|
||||
defer f.Close()
|
||||
l := make([]string, 0)
|
||||
if err := json.NewDecoder(f).Decode(&l); err != nil {
|
||||
fmt.Fprintln(os.Stderr,
|
||||
errors.Wrapf(err, "failed to decode %s", layerChainPath))
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
case len(l) == 1:
|
||||
layerChains[l[0]] = filepath.Join(wfPath, fn)
|
||||
case len(l) > 1:
|
||||
fmt.Fprintf(os.Stderr, "Too many entries in %s: %d", layerChainPath, len(l))
|
||||
case len(l) == 0:
|
||||
roots = append(roots, filepath.Join(wfPath, fn))
|
||||
}
|
||||
case os.IsNotExist(err):
|
||||
// keep on going
|
||||
default:
|
||||
return errors.Wrapf(err, "error trying to access %s", layerChainPath)
|
||||
}
|
||||
}
|
||||
|
||||
// They'll be 2 roots, just take the first one
|
||||
l := roots[0]
|
||||
dockerLayerFolders = append(dockerLayerFolders, l)
|
||||
for {
|
||||
l = layerChains[l]
|
||||
if l == "" {
|
||||
break
|
||||
}
|
||||
|
||||
dockerLayerFolders = append([]string{l}, dockerLayerFolders...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
3
vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
Normal file
3
vendor/github.com/containerd/containerd/code-of-conduct.md
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
## containerd Community Code of Conduct
|
||||
|
||||
containerd follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
316
vendor/github.com/containerd/containerd/container.go
generated
vendored
Normal file
316
vendor/github.com/containerd/containerd/container.go
generated
vendored
Normal file
|
@ -0,0 +1,316 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/cio"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/typeurl"
|
||||
prototypes "github.com/gogo/protobuf/types"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Container is a metadata object for container resources and task creation
|
||||
type Container interface {
|
||||
// ID identifies the container
|
||||
ID() string
|
||||
// Info returns the underlying container record type
|
||||
Info(context.Context) (containers.Container, error)
|
||||
// Delete removes the container
|
||||
Delete(context.Context, ...DeleteOpts) error
|
||||
// NewTask creates a new task based on the container metadata
|
||||
NewTask(context.Context, cio.Creation, ...NewTaskOpts) (Task, error)
|
||||
// Spec returns the OCI runtime specification
|
||||
Spec(context.Context) (*specs.Spec, error)
|
||||
// Task returns the current task for the container
|
||||
//
|
||||
// If cio.Attach options are passed the client will reattach to the IO for the running
|
||||
// task. If no task exists for the container a NotFound error is returned
|
||||
//
|
||||
// Clients must make sure that only one reader is attached to the task and consuming
|
||||
// the output from the task's fifos
|
||||
Task(context.Context, cio.Attach) (Task, error)
|
||||
// Image returns the image that the container is based on
|
||||
Image(context.Context) (Image, error)
|
||||
// Labels returns the labels set on the container
|
||||
Labels(context.Context) (map[string]string, error)
|
||||
// SetLabels sets the provided labels for the container and returns the final label set
|
||||
SetLabels(context.Context, map[string]string) (map[string]string, error)
|
||||
// Extensions returns the extensions set on the container
|
||||
Extensions(context.Context) (map[string]prototypes.Any, error)
|
||||
// Update a container
|
||||
Update(context.Context, ...UpdateContainerOpts) error
|
||||
}
|
||||
|
||||
func containerFromRecord(client *Client, c containers.Container) *container {
|
||||
return &container{
|
||||
client: client,
|
||||
id: c.ID,
|
||||
}
|
||||
}
|
||||
|
||||
var _ = (Container)(&container{})
|
||||
|
||||
type container struct {
|
||||
client *Client
|
||||
id string
|
||||
}
|
||||
|
||||
// ID returns the container's unique id
|
||||
func (c *container) ID() string {
|
||||
return c.id
|
||||
}
|
||||
|
||||
func (c *container) Info(ctx context.Context) (containers.Container, error) {
|
||||
return c.get(ctx)
|
||||
}
|
||||
|
||||
func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Extensions, nil
|
||||
}
|
||||
|
||||
func (c *container) Labels(ctx context.Context) (map[string]string, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Labels, nil
|
||||
}
|
||||
|
||||
func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
|
||||
container := containers.Container{
|
||||
ID: c.id,
|
||||
Labels: labels,
|
||||
}
|
||||
|
||||
var paths []string
|
||||
// mask off paths so we only muck with the labels encountered in labels.
|
||||
// Labels not in the passed in argument will be left alone.
|
||||
for k := range labels {
|
||||
paths = append(paths, strings.Join([]string{"labels", k}, "."))
|
||||
}
|
||||
|
||||
r, err := c.client.ContainerService().Update(ctx, container, paths...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r.Labels, nil
|
||||
}
|
||||
|
||||
// Spec returns the current OCI specification for the container
|
||||
func (c *container) Spec(ctx context.Context) (*specs.Spec, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var s specs.Spec
|
||||
if err := json.Unmarshal(r.Spec.Value, &s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// Delete deletes an existing container
|
||||
// an error is returned if the container has running tasks
|
||||
func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
|
||||
if _, err := c.loadTask(ctx, nil); err == nil {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
|
||||
}
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return c.client.ContainerService().Delete(ctx, c.id)
|
||||
}
|
||||
|
||||
func (c *container) Task(ctx context.Context, attach cio.Attach) (Task, error) {
|
||||
return c.loadTask(ctx, attach)
|
||||
}
|
||||
|
||||
// Image returns the image that the container is based on
|
||||
func (c *container) Image(ctx context.Context) (Image, error) {
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.Image == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
|
||||
}
|
||||
i, err := c.client.ImageService().Get(ctx, r.Image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
|
||||
}
|
||||
return &image{
|
||||
client: c.client,
|
||||
i: i,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *container) NewTask(ctx context.Context, ioCreate cio.Creation, opts ...NewTaskOpts) (_ Task, err error) {
|
||||
i, err := ioCreate(c.id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && i != nil {
|
||||
i.Cancel()
|
||||
i.Close()
|
||||
}
|
||||
}()
|
||||
cfg := i.Config()
|
||||
request := &tasks.CreateTaskRequest{
|
||||
ContainerID: c.id,
|
||||
Terminal: cfg.Terminal,
|
||||
Stdin: cfg.Stdin,
|
||||
Stdout: cfg.Stdout,
|
||||
Stderr: cfg.Stderr,
|
||||
}
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.SnapshotKey != "" {
|
||||
if r.Snapshotter == "" {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
|
||||
}
|
||||
|
||||
// get the rootfs from the snapshotter and add it to the request
|
||||
mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, m := range mounts {
|
||||
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
})
|
||||
}
|
||||
}
|
||||
var info TaskInfo
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, &info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if info.RootFS != nil {
|
||||
for _, m := range info.RootFS {
|
||||
request.Rootfs = append(request.Rootfs, &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
})
|
||||
}
|
||||
}
|
||||
if info.Options != nil {
|
||||
any, err := typeurl.MarshalAny(info.Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
request.Options = any
|
||||
}
|
||||
t := &task{
|
||||
client: c.client,
|
||||
io: i,
|
||||
id: c.id,
|
||||
}
|
||||
if info.Checkpoint != nil {
|
||||
request.Checkpoint = info.Checkpoint
|
||||
}
|
||||
response, err := c.client.TaskService().Create(ctx, request)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
t.pid = response.Pid
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) error {
|
||||
// fetch the current container config before updating it
|
||||
r, err := c.get(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, c.client, &r); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if _, err := c.client.ContainerService().Update(ctx, r); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, error) {
|
||||
response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
|
||||
ContainerID: c.id,
|
||||
})
|
||||
if err != nil {
|
||||
err = errdefs.FromGRPC(err)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "no running task found")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var i cio.IO
|
||||
if ioAttach != nil {
|
||||
if i, err = attachExistingIO(response, ioAttach); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
t := &task{
|
||||
client: c.client,
|
||||
io: i,
|
||||
id: response.Process.ID,
|
||||
pid: response.Process.Pid,
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c *container) get(ctx context.Context) (containers.Container, error) {
|
||||
return c.client.ContainerService().Get(ctx, c.id)
|
||||
}
|
||||
|
||||
func attachExistingIO(response *tasks.GetResponse, ioAttach cio.Attach) (cio.IO, error) {
|
||||
// get the existing fifo paths from the task information stored by the daemon
|
||||
paths := &cio.FIFOSet{
|
||||
Dir: getFifoDir([]string{
|
||||
response.Process.Stdin,
|
||||
response.Process.Stdout,
|
||||
response.Process.Stderr,
|
||||
}),
|
||||
In: response.Process.Stdin,
|
||||
Out: response.Process.Stdout,
|
||||
Err: response.Process.Stderr,
|
||||
Terminal: response.Process.Terminal,
|
||||
}
|
||||
return ioAttach(paths)
|
||||
}
|
||||
|
||||
// getFifoDir looks for any non-empty path for a stdio fifo
|
||||
// and returns the dir for where it is located
|
||||
func getFifoDir(paths []string) string {
|
||||
for _, p := range paths {
|
||||
if p != "" {
|
||||
return filepath.Dir(p)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
252
vendor/github.com/containerd/containerd/container_checkpoint_test.go
generated
vendored
Normal file
252
vendor/github.com/containerd/containerd/container_checkpoint_test.go
generated
vendored
Normal file
|
@ -0,0 +1,252 @@
|
|||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/oci"
|
||||
)
|
||||
|
||||
func TestCheckpointRestore(t *testing.T) {
|
||||
if !supportsCriu {
|
||||
t.Skip("system does not have criu installed")
|
||||
}
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithSnapshotCleanup)
|
||||
|
||||
task, err := container.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
checkpoint, err := task.Checkpoint(ctx, WithExit)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
<-statusC
|
||||
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if task, err = container.NewTask(ctx, empty(), WithTaskCheckpoint(checkpoint)); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC, err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
<-statusC
|
||||
}
|
||||
|
||||
func TestCheckpointRestoreNewContainer(t *testing.T) {
|
||||
if !supportsCriu {
|
||||
t.Skip("system does not have criu installed")
|
||||
}
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
id := t.Name()
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithSnapshotCleanup)
|
||||
|
||||
task, err := container.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
checkpoint, err := task.Checkpoint(ctx, WithExit)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
<-statusC
|
||||
|
||||
if _, err := task.Delete(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if err := container.Delete(ctx, WithSnapshotCleanup); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if container, err = client.NewContainer(ctx, id, WithCheckpoint(checkpoint, id)); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if task, err = container.NewTask(ctx, empty(), WithTaskCheckpoint(checkpoint)); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC, err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
<-statusC
|
||||
}
|
||||
|
||||
func TestCheckpointLeaveRunning(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
if !supportsCriu {
|
||||
t.Skip("system does not have criu installed")
|
||||
}
|
||||
client, err := New(address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
var (
|
||||
ctx, cancel = testContext()
|
||||
id = t.Name()
|
||||
)
|
||||
defer cancel()
|
||||
|
||||
image, err := client.GetImage(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
container, err := client.NewContainer(ctx, id, WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")), WithNewSnapshot(id, image))
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer container.Delete(ctx, WithSnapshotCleanup)
|
||||
|
||||
task, err := container.NewTask(ctx, empty())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
defer task.Delete(ctx)
|
||||
|
||||
statusC, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Start(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := task.Checkpoint(ctx); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
status, err := task.Status(ctx)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if status.Status != Running {
|
||||
t.Errorf("expected status %q but received %q", Running, status)
|
||||
return
|
||||
}
|
||||
|
||||
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
<-statusC
|
||||
}
|
1204
vendor/github.com/containerd/containerd/container_linux_test.go
generated
vendored
Normal file
1204
vendor/github.com/containerd/containerd/container_linux_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
194
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
Normal file
194
vendor/github.com/containerd/containerd/container_opts.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DeleteOpts allows the caller to set options for the deletion of a container
|
||||
type DeleteOpts func(ctx context.Context, client *Client, c containers.Container) error
|
||||
|
||||
// NewContainerOpts allows the caller to set additional options when creating a container
|
||||
type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
|
||||
|
||||
// UpdateContainerOpts allows the caller to set additional options when updating a container
|
||||
type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
|
||||
|
||||
// WithRuntime allows a user to specify the runtime name and additional options that should
|
||||
// be used to create tasks for the container
|
||||
func WithRuntime(name string, options interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
var (
|
||||
any *types.Any
|
||||
err error
|
||||
)
|
||||
if options != nil {
|
||||
any, err = typeurl.MarshalAny(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Runtime = containers.RuntimeInfo{
|
||||
Name: name,
|
||||
Options: any,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImage sets the provided image as the base for the container
|
||||
func WithImage(i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithContainerLabels adds the provided labels to the container
|
||||
func WithContainerLabels(labels map[string]string) NewContainerOpts {
|
||||
return func(_ context.Context, _ *Client, c *containers.Container) error {
|
||||
c.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshotter sets the provided snapshotter for use by the container
|
||||
//
|
||||
// This option must appear before other snapshotter options to have an effect.
|
||||
func WithSnapshotter(name string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
c.Snapshotter = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshot uses an existing root filesystem for the container
|
||||
func WithSnapshot(id string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
setSnapshotterIfEmpty(c)
|
||||
// check that the snapshot exists, if not, fail on creation
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNewSnapshot allocates a new snapshot to be used by the container as the
|
||||
// root filesystem in read-write mode
|
||||
func WithNewSnapshot(id string, i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshotCleanup deletes the rootfs snapshot allocated for the container
|
||||
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||
if c.SnapshotKey != "" {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
||||
}
|
||||
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithNewSnapshotView allocates a new snapshot to be used by the container as the
|
||||
// root filesystem in read-only mode
|
||||
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
parent := identity.ChainID(diffIDs).String()
|
||||
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func setSnapshotterIfEmpty(c *containers.Container) {
|
||||
if c.Snapshotter == "" {
|
||||
c.Snapshotter = DefaultSnapshotter
|
||||
}
|
||||
}
|
||||
|
||||
// WithContainerExtension appends extension data to the container object.
|
||||
// Use this to decorate the container object with additional data for the client
|
||||
// integration.
|
||||
//
|
||||
// Make sure to register the type of `extension` in the typeurl package via
|
||||
// `typeurl.Register` or container creation may fail.
|
||||
func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
if name == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
|
||||
}
|
||||
|
||||
any, err := typeurl.MarshalAny(extension)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == typeurl.ErrNotFound {
|
||||
return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
|
||||
}
|
||||
return errors.Wrap(err, "error marshalling extension")
|
||||
}
|
||||
|
||||
if c.Extensions == nil {
|
||||
c.Extensions = make(map[string]types.Any)
|
||||
}
|
||||
c.Extensions[name] = *any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNewSpec generates a new spec for a new container
|
||||
func WithNewSpec(opts ...oci.SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
s, err := oci.GenerateSpec(ctx, client, c, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec, err = typeurl.MarshalAny(s)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// WithSpec sets the provided spec on the container
|
||||
func WithSpec(s *specs.Spec, opts ...oci.SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, client, c, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
c.Spec, err = typeurl.MarshalAny(s)
|
||||
return err
|
||||
}
|
||||
}
|
220
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
Normal file
220
vendor/github.com/containerd/containerd/container_opts_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,220 @@
|
|||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
protobuf "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// WithCheckpoint allows a container to be created from the checkpointed information
|
||||
// provided by the descriptor. The image, snapshot, and runtime specifications are
|
||||
// restored on the container
|
||||
func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts {
|
||||
// set image and rw, and spec
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
var (
|
||||
desc = im.Target()
|
||||
id = desc.Digest
|
||||
store = client.ContentStore()
|
||||
)
|
||||
index, err := decodeIndex(ctx, store, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var rw *v1.Descriptor
|
||||
for _, m := range index.Manifests {
|
||||
switch m.MediaType {
|
||||
case v1.MediaTypeImageLayer:
|
||||
fk := m
|
||||
rw = &fk
|
||||
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList:
|
||||
config, err := images.Config(ctx, store, m, platforms.Default())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to resolve image config")
|
||||
}
|
||||
diffIDs, err := images.RootFS(ctx, store, config)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get rootfs")
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Image = index.Annotations["image.name"]
|
||||
case images.MediaTypeContainerd1CheckpointConfig:
|
||||
data, err := content.ReadBlob(ctx, store, m.Digest)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to read checkpoint config")
|
||||
}
|
||||
var any protobuf.Any
|
||||
if err := proto.Unmarshal(data, &any); err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = &any
|
||||
}
|
||||
}
|
||||
if rw != nil {
|
||||
// apply the rw snapshot to the new rw layer
|
||||
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to get mounts for %s", snapshotKey)
|
||||
}
|
||||
if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
|
||||
return errors.Wrap(err, "unable to apply rw diff")
|
||||
}
|
||||
}
|
||||
c.SnapshotKey = snapshotKey
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
|
||||
// previous checkpoint. Additional software such as CRIU may be required to
|
||||
// restore a task from a checkpoint
|
||||
func WithTaskCheckpoint(im Image) NewTaskOpts {
|
||||
return func(ctx context.Context, c *Client, info *TaskInfo) error {
|
||||
desc := im.Target()
|
||||
id := desc.Digest
|
||||
index, err := decodeIndex(ctx, c.ContentStore(), id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, m := range index.Manifests {
|
||||
if m.MediaType == images.MediaTypeContainerd1Checkpoint {
|
||||
info.Checkpoint = &types.Descriptor{
|
||||
MediaType: m.MediaType,
|
||||
Size_: m.Size,
|
||||
Digest: m.Digest,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("checkpoint not found in index %s", id)
|
||||
}
|
||||
}
|
||||
|
||||
func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v1.Index, error) {
|
||||
var index v1.Index
|
||||
p, err := content.ReadBlob(ctx, store, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.Unmarshal(p, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &index, nil
|
||||
}
|
||||
|
||||
// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
|
||||
// filesystem to be used by a container with user namespaces
|
||||
func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, false)
|
||||
}
|
||||
|
||||
// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
|
||||
func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, true)
|
||||
}
|
||||
|
||||
func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
setSnapshotterIfEmpty(c)
|
||||
|
||||
var (
|
||||
snapshotter = client.SnapshotService(c.Snapshotter)
|
||||
parent = identity.ChainID(diffIDs).String()
|
||||
usernsID = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
|
||||
)
|
||||
if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
|
||||
if _, err := snapshotter.Prepare(ctx, id, usernsID); err == nil {
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := remapRootFS(mounts, uid, gid); err != nil {
|
||||
snapshotter.Remove(ctx, usernsID)
|
||||
return err
|
||||
}
|
||||
if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
|
||||
return err
|
||||
}
|
||||
if readonly {
|
||||
_, err = snapshotter.View(ctx, id, usernsID)
|
||||
} else {
|
||||
_, err = snapshotter.Prepare(ctx, id, usernsID)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
|
||||
root, err := ioutil.TempDir("", "ctd-remap")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.Remove(root)
|
||||
for _, m := range mounts {
|
||||
if err := m.Mount(root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = filepath.Walk(root, incrementFS(root, uid, gid))
|
||||
if uerr := mount.Unmount(root, 0); err == nil {
|
||||
err = uerr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
stat = info.Sys().(*syscall.Stat_t)
|
||||
u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
|
||||
)
|
||||
// be sure the lchown the path as to not de-reference the symlink to a host file
|
||||
return os.Lchown(path, u, g)
|
||||
}
|
||||
}
|
1623
vendor/github.com/containerd/containerd/container_test.go
generated
vendored
Normal file
1623
vendor/github.com/containerd/containerd/container_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
18
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
18
vendor/github.com/containerd/containerd/containerd.service
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
[Unit]
|
||||
Description=containerd container runtime
|
||||
Documentation=https://containerd.io
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
ExecStartPre=/sbin/modprobe overlay
|
||||
ExecStart=/usr/local/bin/containerd
|
||||
Delegate=yes
|
||||
KillMode=process
|
||||
LimitNOFILE=1048576
|
||||
# Having non-zero Limit*s causes performance problems due to accounting overhead
|
||||
# in the kernel. We recommend using cgroups to do container-local accounting.
|
||||
LimitNPROC=infinity
|
||||
LimitCORE=infinity
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
92
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
Normal file
92
vendor/github.com/containerd/containerd/containers/containers.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package containers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
// Container represents the set of data pinned by a container. Unless otherwise
|
||||
// noted, the resources here are considered in use by the container.
|
||||
//
|
||||
// The resources specified in this object are used to create tasks from the container.
|
||||
type Container struct {
|
||||
// ID uniquely identifies the container in a nameapace.
|
||||
//
|
||||
// This property is required and cannot be changed after creation.
|
||||
ID string
|
||||
|
||||
// Labels provide metadata extension for a contaienr.
|
||||
//
|
||||
// These are optional and fully mutable.
|
||||
Labels map[string]string
|
||||
|
||||
// Image specifies the image reference used for a container.
|
||||
//
|
||||
// This property is optional but immutable.
|
||||
Image string
|
||||
|
||||
// Runtime specifies which runtime should be used when launching container
|
||||
// tasks.
|
||||
//
|
||||
// This property is required and immutable.
|
||||
Runtime RuntimeInfo
|
||||
|
||||
// Spec should carry the the runtime specification used to implement the
|
||||
// container.
|
||||
//
|
||||
// This field is required but mutable.
|
||||
Spec *types.Any
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
SnapshotKey string
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
Snapshotter string
|
||||
|
||||
// CreatedAt is the time at which the container was created.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UpdatedAt is the time at which the container was updated.
|
||||
UpdatedAt time.Time
|
||||
|
||||
// Extensions stores client-specified metadata
|
||||
Extensions map[string]types.Any
|
||||
}
|
||||
|
||||
// RuntimeInfo holds runtime specific information
|
||||
type RuntimeInfo struct {
|
||||
Name string
|
||||
Options *types.Any
|
||||
}
|
||||
|
||||
// Store interacts with the underlying container storage
|
||||
type Store interface {
|
||||
Get(ctx context.Context, id string) (Container, error)
|
||||
|
||||
// List returns containers that match one or more of the provided filters.
|
||||
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||
|
||||
// Create a container in the store from the provided container.
|
||||
Create(ctx context.Context, container Container) (Container, error)
|
||||
|
||||
// Update the container with the provided container object. ID must be set.
|
||||
//
|
||||
// If one or more fieldpaths are provided, only the field corresponding to
|
||||
// the fieldpaths will be mutated.
|
||||
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||
|
||||
// Delete a container using the id.
|
||||
//
|
||||
// nil will be returned on success. If the container is not known to the
|
||||
// store, ErrNotFound will be returned.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
135
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
135
vendor/github.com/containerd/containerd/containerstore.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
containersapi "github.com/containerd/containerd/api/services/containers/v1"
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
||||
type remoteContainers struct {
|
||||
client containersapi.ContainersClient
|
||||
}
|
||||
|
||||
var _ containers.Store = &remoteContainers{}
|
||||
|
||||
// NewRemoteContainerStore returns the container Store connected with the provided client
|
||||
func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
|
||||
return &remoteContainers{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
|
||||
resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
|
||||
ID: id,
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&resp.Container), nil
|
||||
}
|
||||
|
||||
func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
|
||||
resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containersFromProto(resp.Containers), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
|
||||
created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
|
||||
Container: containerToProto(&container),
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&created.Container), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
|
||||
var updateMask *ptypes.FieldMask
|
||||
if len(fieldpaths) > 0 {
|
||||
updateMask = &ptypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
}
|
||||
}
|
||||
|
||||
updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
|
||||
Container: containerToProto(&container),
|
||||
UpdateMask: updateMask,
|
||||
})
|
||||
if err != nil {
|
||||
return containers.Container{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return containerFromProto(&updated.Container), nil
|
||||
|
||||
}
|
||||
|
||||
func (r *remoteContainers) Delete(ctx context.Context, id string) error {
|
||||
_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
|
||||
ID: id,
|
||||
})
|
||||
|
||||
return errdefs.FromGRPC(err)
|
||||
|
||||
}
|
||||
|
||||
func containerToProto(container *containers.Container) containersapi.Container {
|
||||
return containersapi.Container{
|
||||
ID: container.ID,
|
||||
Labels: container.Labels,
|
||||
Image: container.Image,
|
||||
Runtime: &containersapi.Container_Runtime{
|
||||
Name: container.Runtime.Name,
|
||||
Options: container.Runtime.Options,
|
||||
},
|
||||
Spec: container.Spec,
|
||||
Snapshotter: container.Snapshotter,
|
||||
SnapshotKey: container.SnapshotKey,
|
||||
Extensions: container.Extensions,
|
||||
}
|
||||
}
|
||||
|
||||
func containerFromProto(containerpb *containersapi.Container) containers.Container {
|
||||
var runtime containers.RuntimeInfo
|
||||
if containerpb.Runtime != nil {
|
||||
runtime = containers.RuntimeInfo{
|
||||
Name: containerpb.Runtime.Name,
|
||||
Options: containerpb.Runtime.Options,
|
||||
}
|
||||
}
|
||||
return containers.Container{
|
||||
ID: containerpb.ID,
|
||||
Labels: containerpb.Labels,
|
||||
Image: containerpb.Image,
|
||||
Runtime: runtime,
|
||||
Spec: containerpb.Spec,
|
||||
Snapshotter: containerpb.Snapshotter,
|
||||
SnapshotKey: containerpb.SnapshotKey,
|
||||
CreatedAt: containerpb.CreatedAt,
|
||||
UpdatedAt: containerpb.UpdatedAt,
|
||||
Extensions: containerpb.Extensions,
|
||||
}
|
||||
}
|
||||
|
||||
func containersFromProto(containerspb []containersapi.Container) []containers.Container {
|
||||
var containers []containers.Container
|
||||
|
||||
for _, container := range containerspb {
|
||||
containers = append(containers, containerFromProto(&container))
|
||||
}
|
||||
|
||||
return containers
|
||||
}
|
128
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
Normal file
128
vendor/github.com/containerd/containerd/content/content.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
|
||||
type ReaderAt interface {
|
||||
io.ReaderAt
|
||||
io.Closer
|
||||
Size() int64
|
||||
}
|
||||
|
||||
// Provider provides a reader interface for specific content
|
||||
type Provider interface {
|
||||
ReaderAt(ctx context.Context, dgst digest.Digest) (ReaderAt, error)
|
||||
}
|
||||
|
||||
// Ingester writes content
|
||||
type Ingester interface {
|
||||
Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
|
||||
}
|
||||
|
||||
// Info holds content specific information
|
||||
//
|
||||
// TODO(stevvooe): Consider a very different name for this struct. Info is way
|
||||
// to general. It also reads very weird in certain context, like pluralization.
|
||||
type Info struct {
|
||||
Digest digest.Digest
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
// Status of a content operation
|
||||
type Status struct {
|
||||
Ref string
|
||||
Offset int64
|
||||
Total int64
|
||||
Expected digest.Digest
|
||||
StartedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// WalkFunc defines the callback for a blob walk.
|
||||
type WalkFunc func(Info) error
|
||||
|
||||
// Manager provides methods for inspecting, listing and removing content.
|
||||
type Manager interface {
|
||||
// Info will return metadata about content available in the content store.
|
||||
//
|
||||
// If the content is not present, ErrNotFound will be returned.
|
||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
||||
|
||||
// Update updates mutable information related to content.
|
||||
// If one or more fieldpaths are provided, only those
|
||||
// fields will be updated.
|
||||
// Mutable fields:
|
||||
// labels.*
|
||||
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
||||
|
||||
// Walk will call fn for each item in the content store which
|
||||
// match the provided filters. If no filters are given all
|
||||
// items will be walked.
|
||||
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
||||
|
||||
// Delete removes the content from the store.
|
||||
Delete(ctx context.Context, dgst digest.Digest) error
|
||||
}
|
||||
|
||||
// IngestManager provides methods for managing ingests.
|
||||
type IngestManager interface {
|
||||
// Status returns the status of the provided ref.
|
||||
Status(ctx context.Context, ref string) (Status, error)
|
||||
|
||||
// ListStatuses returns the status of any active ingestions whose ref match the
|
||||
// provided regular expression. If empty, all active ingestions will be
|
||||
// returned.
|
||||
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
||||
|
||||
// Abort completely cancels the ingest operation targeted by ref.
|
||||
Abort(ctx context.Context, ref string) error
|
||||
}
|
||||
|
||||
// Writer handles the write of content into a content store
|
||||
type Writer interface {
|
||||
// Close is expected to be called after Commit() when commission is needed.
|
||||
// Closing a writer without commit allows resuming or aborting.
|
||||
io.WriteCloser
|
||||
|
||||
// Digest may return empty digest or panics until committed.
|
||||
Digest() digest.Digest
|
||||
|
||||
// Commit commits the blob (but no roll-back is guaranteed on an error).
|
||||
// size and expected can be zero-value when unknown.
|
||||
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
|
||||
|
||||
// Status returns the current state of write
|
||||
Status() (Status, error)
|
||||
|
||||
// Truncate updates the size of the target blob
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
// Store combines the methods of content-oriented interfaces into a set that
|
||||
// are commonly provided by complete implementations.
|
||||
type Store interface {
|
||||
Manager
|
||||
Provider
|
||||
IngestManager
|
||||
Ingester
|
||||
}
|
||||
|
||||
// Opt is used to alter the mutable properties of content
|
||||
type Opt func(*Info) error
|
||||
|
||||
// WithLabels allows labels to be set on content
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(info *Info) error {
|
||||
info.Labels = labels
|
||||
return nil
|
||||
}
|
||||
}
|
139
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
Normal file
139
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
// NewReader returns a io.Reader from a ReaderAt
|
||||
func NewReader(ra ReaderAt) io.Reader {
|
||||
rd := io.NewSectionReader(ra, 0, ra.Size())
|
||||
return rd
|
||||
}
|
||||
|
||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||
//
|
||||
// Avoid using this for large blobs, such as layers.
|
||||
func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byte, error) {
|
||||
ra, err := provider.ReaderAt(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
p := make([]byte, ra.Size())
|
||||
|
||||
_, err = ra.ReadAt(p, 0)
|
||||
return p, err
|
||||
}
|
||||
|
||||
// WriteBlob writes data with the expected digest into the content store. If
|
||||
// expected already exists, the method returns immediately and the reader will
|
||||
// not be consumed.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
cw, err := cs.Writer(ctx, ref, size, expected)
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
return Copy(ctx, cw, r, size, expected, opts...)
|
||||
}
|
||||
|
||||
// Copy copies data with the expected digest from the reader into the
|
||||
// provided content store writer.
|
||||
//
|
||||
// This is useful when the digest and size are known beforehand. When
|
||||
// the size or digest is unknown, these values may be empty.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
buf := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
if _, err := io.CopyBuffer(cw, r, *buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seekReader attempts to seek the reader to the given offset, either by
|
||||
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
||||
// up to the given offset.
|
||||
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
// attempt to resolve r as a seeker and setup the offset.
|
||||
seeker, ok := r.(io.Seeker)
|
||||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ok, let's try io.ReaderAt!
|
||||
readerAt, ok := r.(io.ReaderAt)
|
||||
if ok && size > offset {
|
||||
sr := io.NewSectionReader(readerAt, offset, size)
|
||||
return sr, nil
|
||||
}
|
||||
|
||||
// well then, let's just discard up to the offset
|
||||
buf := bufPool.Get().(*[]byte)
|
||||
defer bufPool.Put(buf)
|
||||
|
||||
n, err := io.CopyBuffer(ioutil.Discard, io.LimitReader(r, offset), *buf)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
112
vendor/github.com/containerd/containerd/content/helpers_test.go
generated
vendored
Normal file
112
vendor/github.com/containerd/containerd/content/helpers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
|||
package content
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type copySource struct {
|
||||
reader io.Reader
|
||||
size int64
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
defaultSource := newCopySource("this is the source to copy")
|
||||
|
||||
var testcases = []struct {
|
||||
name string
|
||||
source copySource
|
||||
writer fakeWriter
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "copy no offset",
|
||||
source: defaultSource,
|
||||
writer: fakeWriter{},
|
||||
expected: "this is the source to copy",
|
||||
},
|
||||
{
|
||||
name: "copy with offset from seeker",
|
||||
source: defaultSource,
|
||||
writer: fakeWriter{status: Status{Offset: 8}},
|
||||
expected: "the source to copy",
|
||||
},
|
||||
{
|
||||
name: "copy with offset from unseekable source",
|
||||
source: copySource{reader: bytes.NewBufferString("foobar"), size: 6},
|
||||
writer: fakeWriter{status: Status{Offset: 3}},
|
||||
expected: "bar",
|
||||
},
|
||||
{
|
||||
name: "commit already exists",
|
||||
source: defaultSource,
|
||||
writer: fakeWriter{commitFunc: func() error {
|
||||
return errdefs.ErrAlreadyExists
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.name, func(t *testing.T) {
|
||||
err := Copy(context.Background(),
|
||||
&testcase.writer,
|
||||
testcase.source.reader,
|
||||
testcase.source.size,
|
||||
testcase.source.digest)
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, testcase.source.digest, testcase.writer.commitedDigest)
|
||||
assert.Equal(t, testcase.expected, testcase.writer.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newCopySource(raw string) copySource {
|
||||
return copySource{
|
||||
reader: strings.NewReader(raw),
|
||||
size: int64(len(raw)),
|
||||
digest: digest.FromBytes([]byte(raw)),
|
||||
}
|
||||
}
|
||||
|
||||
type fakeWriter struct {
|
||||
bytes.Buffer
|
||||
commitedDigest digest.Digest
|
||||
status Status
|
||||
commitFunc func() error
|
||||
}
|
||||
|
||||
func (f *fakeWriter) Close() error {
|
||||
f.Buffer.Reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakeWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
f.commitedDigest = expected
|
||||
if f.commitFunc == nil {
|
||||
return nil
|
||||
}
|
||||
return f.commitFunc()
|
||||
}
|
||||
|
||||
func (f *fakeWriter) Digest() digest.Digest {
|
||||
return f.commitedDigest
|
||||
}
|
||||
|
||||
func (f *fakeWriter) Status() (Status, error) {
|
||||
return f.status, nil
|
||||
}
|
||||
|
||||
func (f *fakeWriter) Truncate(size int64) error {
|
||||
f.Buffer.Truncate(int(size))
|
||||
return nil
|
||||
}
|
49
vendor/github.com/containerd/containerd/content_reader.go
generated
vendored
Normal file
49
vendor/github.com/containerd/containerd/content_reader.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type remoteReaderAt struct {
|
||||
ctx context.Context
|
||||
digest digest.Digest
|
||||
size int64
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Size() int64 {
|
||||
return ra.size
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
rr := &contentapi.ReadContentRequest{
|
||||
Digest: ra.digest,
|
||||
Offset: off,
|
||||
Size_: int64(len(p)),
|
||||
}
|
||||
rc, err := ra.client.Read(ra.ctx, rr)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for len(p) > 0 {
|
||||
var resp *contentapi.ReadContentResponse
|
||||
// fill our buffer up until we can fill p.
|
||||
resp, err = rc.Recv()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
copied := copy(p, resp.Data)
|
||||
n += copied
|
||||
p = p[copied:]
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Close() error {
|
||||
return nil
|
||||
}
|
208
vendor/github.com/containerd/containerd/content_store.go
generated
vendored
Normal file
208
vendor/github.com/containerd/containerd/content_store.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
protobuftypes "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
type remoteContent struct {
|
||||
client contentapi.ContentClient
|
||||
}
|
||||
|
||||
// NewContentStoreFromClient returns a new content store
|
||||
func NewContentStoreFromClient(client contentapi.ContentClient) content.Store {
|
||||
return &remoteContent{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
|
||||
Digest: dgst,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
||||
session, err := rs.client.List(ctx, &contentapi.ListContentRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := session.Recv()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
break
|
||||
}
|
||||
|
||||
for _, info := range msg.Info {
|
||||
if err := fn(infoFromGRPC(info)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
||||
Digest: dgst,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
|
||||
i, err := rs.Info(ctx, dgst)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &remoteReaderAt{
|
||||
ctx: ctx,
|
||||
digest: dgst,
|
||||
size: i.Size,
|
||||
client: rs.client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Status(ctx context.Context, ref string) (content.Status, error) {
|
||||
resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
status := resp.Status
|
||||
return content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
resp, err := rs.client.Update(ctx, &contentapi.UpdateRequest{
|
||||
Info: infoToGRPC(info),
|
||||
UpdateMask: &protobuftypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
|
||||
resp, err := rs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
var statuses []content.Status
|
||||
for _, status := range resp.Statuses {
|
||||
statuses = append(statuses, content.Status{
|
||||
Ref: status.Ref,
|
||||
StartedAt: status.StartedAt,
|
||||
UpdatedAt: status.UpdatedAt,
|
||||
Offset: status.Offset,
|
||||
Total: status.Total,
|
||||
Expected: status.Expected,
|
||||
})
|
||||
}
|
||||
|
||||
return statuses, nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
|
||||
wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
ref: ref,
|
||||
client: wrclient,
|
||||
offset: offset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Abort implements asynchronous abort. It starts a new write session on the ref l
|
||||
func (rs *remoteContent) Abort(ctx context.Context, ref string) error {
|
||||
if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
|
||||
Ref: ref,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *remoteContent) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
wrclient, err := rs.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if err := wrclient.Send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
Ref: ref,
|
||||
Total: size,
|
||||
Expected: expected,
|
||||
}); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
resp, err := wrclient.Recv()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
||||
|
||||
func infoToGRPC(info content.Info) contentapi.Info {
|
||||
return contentapi.Info{
|
||||
Digest: info.Digest,
|
||||
Size_: info.Size,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
func infoFromGRPC(info contentapi.Info) content.Info {
|
||||
return content.Info{
|
||||
Digest: info.Digest,
|
||||
Size: info.Size_,
|
||||
CreatedAt: info.CreatedAt,
|
||||
UpdatedAt: info.UpdatedAt,
|
||||
Labels: info.Labels,
|
||||
}
|
||||
}
|
54
vendor/github.com/containerd/containerd/content_test.go
generated
vendored
Normal file
54
vendor/github.com/containerd/containerd/content_test.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/content/testsuite"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func newContentStore(ctx context.Context, root string) (context.Context, content.Store, func() error, error) {
|
||||
client, err := New(address)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
ctx, releaselease, err := client.WithLease(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
cs := client.ContentStore()
|
||||
|
||||
return ctx, cs, func() error {
|
||||
statuses, err := cs.ListStatuses(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, st := range statuses {
|
||||
if err := cs.Abort(ctx, st.Ref); err != nil {
|
||||
return errors.Wrapf(err, "failed to abort %s", st.Ref)
|
||||
}
|
||||
}
|
||||
releaselease()
|
||||
return cs.Walk(ctx, func(info content.Info) error {
|
||||
if err := cs.Delete(ctx, info.Digest); err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestContentClient(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip()
|
||||
}
|
||||
testsuite.ContentSuite(t, "ContentClient", newContentStore)
|
||||
}
|
123
vendor/github.com/containerd/containerd/content_writer.go
generated
vendored
Normal file
123
vendor/github.com/containerd/containerd/content_writer.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
ref string
|
||||
client contentapi.Content_WriteClient
|
||||
offset int64
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
// send performs a synchronous req-resp cycle on the client.
|
||||
func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
|
||||
if err := rw.client.Send(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := rw.client.Recv()
|
||||
|
||||
if err == nil {
|
||||
// try to keep these in sync
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Status() (content.Status, error) {
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionStat,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrap(err, "error getting writer status")
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
Ref: rw.ref,
|
||||
Offset: resp.Offset,
|
||||
Total: resp.Total,
|
||||
StartedAt: resp.StartedAt,
|
||||
UpdatedAt: resp.UpdatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Digest() digest.Digest {
|
||||
return rw.digest
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
offset := rw.offset
|
||||
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionWrite,
|
||||
Offset: offset,
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
if n < len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
|
||||
rw.offset += int64(n)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = resp.Digest
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteActionCommit,
|
||||
Total: size,
|
||||
Offset: rw.offset,
|
||||
Expected: expected,
|
||||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
}
|
||||
|
||||
if expected != "" && resp.Digest != expected {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
rw.offset = resp.Offset
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Truncate(size int64) error {
|
||||
// This truncation won't actually be validated until a write is issued.
|
||||
rw.offset = size
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Close() error {
|
||||
return rw.client.CloseSend()
|
||||
}
|
11
vendor/github.com/containerd/containerd/contrib/README.md
generated
vendored
Normal file
11
vendor/github.com/containerd/containerd/contrib/README.md
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
# contrib
|
||||
|
||||
The `contrib` directory contains packages that do not belong in the core containerd packages but still contribute to overall containerd usability.
|
||||
|
||||
Package such as Apparmor or Selinux are placed in `contrib` because they are platform dependent and often require higher level tools and profiles to work.
|
||||
|
||||
Packaging and other built tools can be added to `contrib` to aid in packaging containerd for various distributions.
|
||||
|
||||
## Testing
|
||||
|
||||
Code in the `contrib` directory may or may not have been tested in the normal test pipeline for core components.
|
40
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go
generated
vendored
Normal file
40
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
// +build linux
|
||||
|
||||
package seccomp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// WithProfile receives the name of a file stored on disk comprising a json
|
||||
// formated seccomp profile, as specified by the opencontainers/runtime-spec.
|
||||
// The profile is read from the file, unmarshaled, and set to the spec.
|
||||
func WithProfile(profile string) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Linux.Seccomp = &specs.LinuxSeccomp{}
|
||||
f, err := ioutil.ReadFile(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot load seccomp profile %q: %v", profile, err)
|
||||
}
|
||||
if err := json.Unmarshal(f, s.Linux.Seccomp); err != nil {
|
||||
return fmt.Errorf("Decoding seccomp profile failed %q: %v", profile, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDefaultProfile sets the default seccomp profile to the spec.
|
||||
// Note: must follow the setting of process capabilities
|
||||
func WithDefaultProfile() oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Linux.Seccomp = DefaultProfile(s)
|
||||
return nil
|
||||
}
|
||||
}
|
581
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
Normal file
581
vendor/github.com/containerd/containerd/contrib/seccomp/seccomp_default.go
generated
vendored
Normal file
|
@ -0,0 +1,581 @@
|
|||
// +build linux
|
||||
|
||||
package seccomp
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"syscall"
|
||||
|
||||
"github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func arches() []specs.Arch {
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
return []specs.Arch{specs.ArchX86_64, specs.ArchX86, specs.ArchX32}
|
||||
case "arm64":
|
||||
return []specs.Arch{specs.ArchARM, specs.ArchAARCH64}
|
||||
case "mips64":
|
||||
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
|
||||
case "mips64n32":
|
||||
return []specs.Arch{specs.ArchMIPS, specs.ArchMIPS64, specs.ArchMIPS64N32}
|
||||
case "mipsel64":
|
||||
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
|
||||
case "mipsel64n32":
|
||||
return []specs.Arch{specs.ArchMIPSEL, specs.ArchMIPSEL64, specs.ArchMIPSEL64N32}
|
||||
case "s390x":
|
||||
return []specs.Arch{specs.ArchS390, specs.ArchS390X}
|
||||
default:
|
||||
return []specs.Arch{}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultProfile defines the whitelist for the default seccomp profile.
|
||||
func DefaultProfile(sp *specs.Spec) *specs.LinuxSeccomp {
|
||||
syscalls := []specs.LinuxSyscall{
|
||||
{
|
||||
Names: []string{
|
||||
"accept",
|
||||
"accept4",
|
||||
"access",
|
||||
"alarm",
|
||||
"alarm",
|
||||
"bind",
|
||||
"brk",
|
||||
"capget",
|
||||
"capset",
|
||||
"chdir",
|
||||
"chmod",
|
||||
"chown",
|
||||
"chown32",
|
||||
"clock_getres",
|
||||
"clock_gettime",
|
||||
"clock_nanosleep",
|
||||
"close",
|
||||
"connect",
|
||||
"copy_file_range",
|
||||
"creat",
|
||||
"dup",
|
||||
"dup2",
|
||||
"dup3",
|
||||
"epoll_create",
|
||||
"epoll_create1",
|
||||
"epoll_ctl",
|
||||
"epoll_ctl_old",
|
||||
"epoll_pwait",
|
||||
"epoll_wait",
|
||||
"epoll_wait_old",
|
||||
"eventfd",
|
||||
"eventfd2",
|
||||
"execve",
|
||||
"execveat",
|
||||
"exit",
|
||||
"exit_group",
|
||||
"faccessat",
|
||||
"fadvise64",
|
||||
"fadvise64_64",
|
||||
"fallocate",
|
||||
"fanotify_mark",
|
||||
"fchdir",
|
||||
"fchmod",
|
||||
"fchmodat",
|
||||
"fchown",
|
||||
"fchown32",
|
||||
"fchownat",
|
||||
"fcntl",
|
||||
"fcntl64",
|
||||
"fdatasync",
|
||||
"fgetxattr",
|
||||
"flistxattr",
|
||||
"flock",
|
||||
"fork",
|
||||
"fremovexattr",
|
||||
"fsetxattr",
|
||||
"fstat",
|
||||
"fstat64",
|
||||
"fstatat64",
|
||||
"fstatfs",
|
||||
"fstatfs64",
|
||||
"fsync",
|
||||
"ftruncate",
|
||||
"ftruncate64",
|
||||
"futex",
|
||||
"futimesat",
|
||||
"getcpu",
|
||||
"getcwd",
|
||||
"getdents",
|
||||
"getdents64",
|
||||
"getegid",
|
||||
"getegid32",
|
||||
"geteuid",
|
||||
"geteuid32",
|
||||
"getgid",
|
||||
"getgid32",
|
||||
"getgroups",
|
||||
"getgroups32",
|
||||
"getitimer",
|
||||
"getpeername",
|
||||
"getpgid",
|
||||
"getpgrp",
|
||||
"getpid",
|
||||
"getppid",
|
||||
"getpriority",
|
||||
"getrandom",
|
||||
"getresgid",
|
||||
"getresgid32",
|
||||
"getresuid",
|
||||
"getresuid32",
|
||||
"getrlimit",
|
||||
"get_robust_list",
|
||||
"getrusage",
|
||||
"getsid",
|
||||
"getsockname",
|
||||
"getsockopt",
|
||||
"get_thread_area",
|
||||
"gettid",
|
||||
"gettimeofday",
|
||||
"getuid",
|
||||
"getuid32",
|
||||
"getxattr",
|
||||
"inotify_add_watch",
|
||||
"inotify_init",
|
||||
"inotify_init1",
|
||||
"inotify_rm_watch",
|
||||
"io_cancel",
|
||||
"ioctl",
|
||||
"io_destroy",
|
||||
"io_getevents",
|
||||
"ioprio_get",
|
||||
"ioprio_set",
|
||||
"io_setup",
|
||||
"io_submit",
|
||||
"ipc",
|
||||
"kill",
|
||||
"lchown",
|
||||
"lchown32",
|
||||
"lgetxattr",
|
||||
"link",
|
||||
"linkat",
|
||||
"listen",
|
||||
"listxattr",
|
||||
"llistxattr",
|
||||
"_llseek",
|
||||
"lremovexattr",
|
||||
"lseek",
|
||||
"lsetxattr",
|
||||
"lstat",
|
||||
"lstat64",
|
||||
"madvise",
|
||||
"memfd_create",
|
||||
"mincore",
|
||||
"mkdir",
|
||||
"mkdirat",
|
||||
"mknod",
|
||||
"mknodat",
|
||||
"mlock",
|
||||
"mlock2",
|
||||
"mlockall",
|
||||
"mmap",
|
||||
"mmap2",
|
||||
"mprotect",
|
||||
"mq_getsetattr",
|
||||
"mq_notify",
|
||||
"mq_open",
|
||||
"mq_timedreceive",
|
||||
"mq_timedsend",
|
||||
"mq_unlink",
|
||||
"mremap",
|
||||
"msgctl",
|
||||
"msgget",
|
||||
"msgrcv",
|
||||
"msgsnd",
|
||||
"msync",
|
||||
"munlock",
|
||||
"munlockall",
|
||||
"munmap",
|
||||
"nanosleep",
|
||||
"newfstatat",
|
||||
"_newselect",
|
||||
"open",
|
||||
"openat",
|
||||
"pause",
|
||||
"pipe",
|
||||
"pipe2",
|
||||
"poll",
|
||||
"ppoll",
|
||||
"prctl",
|
||||
"pread64",
|
||||
"preadv",
|
||||
"prlimit64",
|
||||
"pselect6",
|
||||
"pwrite64",
|
||||
"pwritev",
|
||||
"read",
|
||||
"readahead",
|
||||
"readlink",
|
||||
"readlinkat",
|
||||
"readv",
|
||||
"recv",
|
||||
"recvfrom",
|
||||
"recvmmsg",
|
||||
"recvmsg",
|
||||
"remap_file_pages",
|
||||
"removexattr",
|
||||
"rename",
|
||||
"renameat",
|
||||
"renameat2",
|
||||
"restart_syscall",
|
||||
"rmdir",
|
||||
"rt_sigaction",
|
||||
"rt_sigpending",
|
||||
"rt_sigprocmask",
|
||||
"rt_sigqueueinfo",
|
||||
"rt_sigreturn",
|
||||
"rt_sigsuspend",
|
||||
"rt_sigtimedwait",
|
||||
"rt_tgsigqueueinfo",
|
||||
"sched_getaffinity",
|
||||
"sched_getattr",
|
||||
"sched_getparam",
|
||||
"sched_get_priority_max",
|
||||
"sched_get_priority_min",
|
||||
"sched_getscheduler",
|
||||
"sched_rr_get_interval",
|
||||
"sched_setaffinity",
|
||||
"sched_setattr",
|
||||
"sched_setparam",
|
||||
"sched_setscheduler",
|
||||
"sched_yield",
|
||||
"seccomp",
|
||||
"select",
|
||||
"semctl",
|
||||
"semget",
|
||||
"semop",
|
||||
"semtimedop",
|
||||
"send",
|
||||
"sendfile",
|
||||
"sendfile64",
|
||||
"sendmmsg",
|
||||
"sendmsg",
|
||||
"sendto",
|
||||
"setfsgid",
|
||||
"setfsgid32",
|
||||
"setfsuid",
|
||||
"setfsuid32",
|
||||
"setgid",
|
||||
"setgid32",
|
||||
"setgroups",
|
||||
"setgroups32",
|
||||
"setitimer",
|
||||
"setpgid",
|
||||
"setpriority",
|
||||
"setregid",
|
||||
"setregid32",
|
||||
"setresgid",
|
||||
"setresgid32",
|
||||
"setresuid",
|
||||
"setresuid32",
|
||||
"setreuid",
|
||||
"setreuid32",
|
||||
"setrlimit",
|
||||
"set_robust_list",
|
||||
"setsid",
|
||||
"setsockopt",
|
||||
"set_thread_area",
|
||||
"set_tid_address",
|
||||
"setuid",
|
||||
"setuid32",
|
||||
"setxattr",
|
||||
"shmat",
|
||||
"shmctl",
|
||||
"shmdt",
|
||||
"shmget",
|
||||
"shutdown",
|
||||
"sigaltstack",
|
||||
"signalfd",
|
||||
"signalfd4",
|
||||
"sigreturn",
|
||||
"socket",
|
||||
"socketcall",
|
||||
"socketpair",
|
||||
"splice",
|
||||
"stat",
|
||||
"stat64",
|
||||
"statfs",
|
||||
"statfs64",
|
||||
"symlink",
|
||||
"symlinkat",
|
||||
"sync",
|
||||
"sync_file_range",
|
||||
"syncfs",
|
||||
"sysinfo",
|
||||
"syslog",
|
||||
"tee",
|
||||
"tgkill",
|
||||
"time",
|
||||
"timer_create",
|
||||
"timer_delete",
|
||||
"timerfd_create",
|
||||
"timerfd_gettime",
|
||||
"timerfd_settime",
|
||||
"timer_getoverrun",
|
||||
"timer_gettime",
|
||||
"timer_settime",
|
||||
"times",
|
||||
"tkill",
|
||||
"truncate",
|
||||
"truncate64",
|
||||
"ugetrlimit",
|
||||
"umask",
|
||||
"uname",
|
||||
"unlink",
|
||||
"unlinkat",
|
||||
"utime",
|
||||
"utimensat",
|
||||
"utimes",
|
||||
"vfork",
|
||||
"vmsplice",
|
||||
"wait4",
|
||||
"waitid",
|
||||
"waitpid",
|
||||
"write",
|
||||
"writev",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
},
|
||||
{
|
||||
Names: []string{"personality"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0x0,
|
||||
Op: specs.OpEqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Names: []string{"personality"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0x0008,
|
||||
Op: specs.OpEqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Names: []string{"personality"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: 0xffffffff,
|
||||
Op: specs.OpEqualTo,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s := &specs.LinuxSeccomp{
|
||||
DefaultAction: specs.ActErrno,
|
||||
Architectures: arches(),
|
||||
Syscalls: syscalls,
|
||||
}
|
||||
|
||||
// include by arch
|
||||
switch runtime.GOARCH {
|
||||
case "arm", "arm64":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"arm_fadvise64_64",
|
||||
"arm_sync_file_range",
|
||||
"breakpoint",
|
||||
"cacheflush",
|
||||
"set_tls",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "amd64":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"arch_prctl",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
fallthrough
|
||||
case "386":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"modify_ldt",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "s390", "s390x":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"s390_pci_mmio_read",
|
||||
"s390_pci_mmio_write",
|
||||
"s390_runtime_instr",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
}
|
||||
|
||||
// make a map of enabled capabilities
|
||||
caps := make(map[string]bool)
|
||||
for _, c := range sp.Process.Capabilities.Bounding {
|
||||
caps[c] = true
|
||||
}
|
||||
for _, c := range sp.Process.Capabilities.Effective {
|
||||
caps[c] = true
|
||||
}
|
||||
for _, c := range sp.Process.Capabilities.Inheritable {
|
||||
caps[c] = true
|
||||
}
|
||||
for _, c := range sp.Process.Capabilities.Permitted {
|
||||
caps[c] = true
|
||||
}
|
||||
for _, c := range sp.Process.Capabilities.Ambient {
|
||||
caps[c] = true
|
||||
}
|
||||
|
||||
for c := range caps {
|
||||
switch c {
|
||||
case "CAP_DAC_READ_SEARCH":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{"open_by_handle_at"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_ADMIN":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"bpf",
|
||||
"clone",
|
||||
"fanotify_init",
|
||||
"lookup_dcookie",
|
||||
"mount",
|
||||
"name_to_handle_at",
|
||||
"perf_event_open",
|
||||
"setdomainname",
|
||||
"sethostname",
|
||||
"setns",
|
||||
"umount",
|
||||
"umount2",
|
||||
"unshare",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_BOOT":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{"reboot"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_CHROOT":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{"chroot"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_MODULE":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"delete_module",
|
||||
"init_module",
|
||||
"finit_module",
|
||||
"query_module",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_PACCT":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{"acct"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_PTRACE":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"kcmp",
|
||||
"process_vm_readv",
|
||||
"process_vm_writev",
|
||||
"ptrace",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_RAWIO":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"iopl",
|
||||
"ioperm",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_TIME":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"settimeofday",
|
||||
"stime",
|
||||
"adjtimex",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
case "CAP_SYS_TTY_CONFIG":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{"vhangup"},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if !caps["CAP_SYS_ADMIN"] {
|
||||
switch runtime.GOARCH {
|
||||
case "s390", "s390x":
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"clone",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{
|
||||
{
|
||||
Index: 1,
|
||||
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
|
||||
ValueTwo: 0,
|
||||
Op: specs.OpMaskedEqual,
|
||||
},
|
||||
},
|
||||
})
|
||||
default:
|
||||
s.Syscalls = append(s.Syscalls, specs.LinuxSyscall{
|
||||
Names: []string{
|
||||
"clone",
|
||||
},
|
||||
Action: specs.ActAllow,
|
||||
Args: []specs.LinuxSeccompArg{
|
||||
{
|
||||
Index: 0,
|
||||
Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET,
|
||||
ValueTwo: 0,
|
||||
Op: specs.OpMaskedEqual,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
117
vendor/github.com/containerd/containerd/daemon_test.go
generated
vendored
Normal file
117
vendor/github.com/containerd/containerd/daemon_test.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os/exec"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type daemon struct {
|
||||
sync.Mutex
|
||||
addr string
|
||||
cmd *exec.Cmd
|
||||
}
|
||||
|
||||
func (d *daemon) start(name, address string, args []string, stdout, stderr io.Writer) error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.cmd != nil {
|
||||
return errors.New("daemon is already running")
|
||||
}
|
||||
args = append(args, []string{"--address", address}...)
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
cmd.Wait()
|
||||
return errors.Wrap(err, "failed to start daemon")
|
||||
}
|
||||
d.addr = address
|
||||
d.cmd = cmd
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *daemon) waitForStart(ctx context.Context) (*Client, error) {
|
||||
var (
|
||||
client *Client
|
||||
serving bool
|
||||
err error
|
||||
)
|
||||
|
||||
client, err = New(address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serving, err = client.IsServing(ctx)
|
||||
if !serving {
|
||||
client.Close()
|
||||
if err == nil {
|
||||
err = errors.New("connection was successful but service is not available")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return client, err
|
||||
}
|
||||
|
||||
func (d *daemon) Stop() error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.cmd == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
return d.cmd.Process.Signal(syscall.SIGTERM)
|
||||
}
|
||||
|
||||
func (d *daemon) Kill() error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.cmd == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
return d.cmd.Process.Kill()
|
||||
}
|
||||
|
||||
func (d *daemon) Wait() error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.cmd == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
err := d.cmd.Wait()
|
||||
d.cmd = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *daemon) Restart(stopCb func()) error {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
if d.cmd == nil {
|
||||
return errors.New("daemon is not running")
|
||||
}
|
||||
|
||||
var err error
|
||||
if err = d.cmd.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
return errors.Wrap(err, "failed to signal daemon")
|
||||
}
|
||||
|
||||
d.cmd.Wait()
|
||||
|
||||
if stopCb != nil {
|
||||
stopCb()
|
||||
}
|
||||
|
||||
cmd := exec.Command(d.cmd.Path, d.cmd.Args[1:]...)
|
||||
cmd.Stdout = d.cmd.Stdout
|
||||
cmd.Stderr = d.cmd.Stderr
|
||||
if err := cmd.Start(); err != nil {
|
||||
cmd.Wait()
|
||||
return errors.Wrap(err, "failed to start new daemon instance")
|
||||
}
|
||||
d.cmd = cmd
|
||||
|
||||
return nil
|
||||
}
|
83
vendor/github.com/containerd/containerd/diff.go
generated
vendored
Normal file
83
vendor/github.com/containerd/containerd/diff.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
diffapi "github.com/containerd/containerd/api/services/diff/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/diff"
|
||||
"github.com/containerd/containerd/mount"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// NewDiffServiceFromClient returns a new diff service which communicates
|
||||
// over a GRPC connection.
|
||||
func NewDiffServiceFromClient(client diffapi.DiffClient) diff.Differ {
|
||||
return &diffRemote{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
type diffRemote struct {
|
||||
client diffapi.DiffClient
|
||||
}
|
||||
|
||||
func (r *diffRemote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
|
||||
req := &diffapi.ApplyRequest{
|
||||
Diff: fromDescriptor(diff),
|
||||
Mounts: fromMounts(mounts),
|
||||
}
|
||||
resp, err := r.client.Apply(ctx, req)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return toDescriptor(resp.Applied), nil
|
||||
}
|
||||
|
||||
func (r *diffRemote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
|
||||
var config diff.Config
|
||||
for _, opt := range opts {
|
||||
if err := opt(&config); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
req := &diffapi.DiffRequest{
|
||||
Left: fromMounts(a),
|
||||
Right: fromMounts(b),
|
||||
MediaType: config.MediaType,
|
||||
Ref: config.Reference,
|
||||
Labels: config.Labels,
|
||||
}
|
||||
resp, err := r.client.Diff(ctx, req)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return toDescriptor(resp.Diff), nil
|
||||
}
|
||||
|
||||
func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size_,
|
||||
}
|
||||
}
|
||||
|
||||
func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
|
||||
return &types.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size_: d.Size,
|
||||
}
|
||||
}
|
||||
|
||||
func fromMounts(mounts []mount.Mount) []*types.Mount {
|
||||
apiMounts := make([]*types.Mount, len(mounts))
|
||||
for i, m := range mounts {
|
||||
apiMounts[i] = &types.Mount{
|
||||
Type: m.Type,
|
||||
Source: m.Source,
|
||||
Options: m.Options,
|
||||
}
|
||||
}
|
||||
return apiMounts
|
||||
}
|
62
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
62
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
//
|
||||
// The functions ToGRPC and FromGRPC can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errdefs
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// For the most part, we just try to provide local grpc errors. Most conditions
|
||||
// map very well to those defined by grpc.
|
||||
var (
|
||||
ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping.
|
||||
ErrInvalidArgument = errors.New("invalid argument")
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
// IsInvalidArgument returns true if the error is due to an invalid argument
|
||||
func IsInvalidArgument(err error) bool {
|
||||
return errors.Cause(err) == ErrInvalidArgument
|
||||
}
|
||||
|
||||
// IsNotFound returns true if the error is due to a missing object
|
||||
func IsNotFound(err error) bool {
|
||||
return errors.Cause(err) == ErrNotFound
|
||||
}
|
||||
|
||||
// IsAlreadyExists returns true if the error is due to an already existing
|
||||
// metadata item
|
||||
func IsAlreadyExists(err error) bool {
|
||||
return errors.Cause(err) == ErrAlreadyExists
|
||||
}
|
||||
|
||||
// IsFailedPrecondition returns true if an operation could not proceed to the
|
||||
// lack of a particular condition
|
||||
func IsFailedPrecondition(err error) bool {
|
||||
return errors.Cause(err) == ErrFailedPrecondition
|
||||
}
|
||||
|
||||
// IsUnavailable returns true if the error is due to a resource being unavailable
|
||||
func IsUnavailable(err error) bool {
|
||||
return errors.Cause(err) == ErrUnavailable
|
||||
}
|
||||
|
||||
// IsNotImplemented returns true if the error is due to not being implemented
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Cause(err) == ErrNotImplemented
|
||||
}
|
122
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
122
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
package errdefs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrapf(cls, msg)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
65
vendor/github.com/containerd/containerd/errdefs/grpc_test.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/errdefs/grpc_test.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package errdefs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func TestGRPCRoundTrip(t *testing.T) {
|
||||
errShouldLeaveAlone := errors.New("unknown to package")
|
||||
|
||||
for _, testcase := range []struct {
|
||||
input error
|
||||
cause error
|
||||
str string
|
||||
}{
|
||||
{
|
||||
input: ErrAlreadyExists,
|
||||
cause: ErrAlreadyExists,
|
||||
},
|
||||
{
|
||||
input: ErrNotFound,
|
||||
cause: ErrNotFound,
|
||||
},
|
||||
{
|
||||
input: errors.Wrapf(ErrFailedPrecondition, "test test test"),
|
||||
cause: ErrFailedPrecondition,
|
||||
str: "test test test: failed precondition",
|
||||
},
|
||||
{
|
||||
input: status.Errorf(codes.Unavailable, "should be not available"),
|
||||
cause: ErrUnavailable,
|
||||
str: "should be not available: unavailable",
|
||||
},
|
||||
{
|
||||
input: errShouldLeaveAlone,
|
||||
cause: ErrUnknown,
|
||||
str: errShouldLeaveAlone.Error() + ": " + ErrUnknown.Error(),
|
||||
},
|
||||
} {
|
||||
t.Run(testcase.input.Error(), func(t *testing.T) {
|
||||
t.Logf("input: %v", testcase.input)
|
||||
gerr := ToGRPC(testcase.input)
|
||||
t.Logf("grpc: %v", gerr)
|
||||
ferr := FromGRPC(gerr)
|
||||
t.Logf("recovered: %v", ferr)
|
||||
|
||||
if errors.Cause(ferr) != testcase.cause {
|
||||
t.Fatalf("unexpected cause: %v != %v", errors.Cause(ferr), testcase.cause)
|
||||
}
|
||||
|
||||
expected := testcase.str
|
||||
if expected == "" {
|
||||
expected = testcase.cause.Error()
|
||||
}
|
||||
if ferr.Error() != expected {
|
||||
t.Fatalf("unexpected string: %q != %q", ferr.Error(), expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
65
vendor/github.com/containerd/containerd/export_test.go
generated
vendored
Normal file
65
vendor/github.com/containerd/containerd/export_test.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"io"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/images/oci"
|
||||
)
|
||||
|
||||
// TestOCIExport exports testImage as a tar stream
|
||||
func TestOCIExport(t *testing.T) {
|
||||
// TODO: support windows
|
||||
if testing.Short() || runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
client, err := New(address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
pulled, err := client.Pull(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
exportedStream, err := client.Export(ctx, &oci.V1Exporter{}, pulled.Target())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assertOCITar(t, exportedStream)
|
||||
}
|
||||
|
||||
func assertOCITar(t *testing.T, r io.Reader) {
|
||||
// TODO: add more assertion
|
||||
tr := tar.NewReader(r)
|
||||
foundOCILayout := false
|
||||
foundIndexJSON := false
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
if h.Name == "oci-layout" {
|
||||
foundOCILayout = true
|
||||
}
|
||||
if h.Name == "index.json" {
|
||||
foundIndexJSON = true
|
||||
}
|
||||
}
|
||||
if !foundOCILayout {
|
||||
t.Error("oci-layout not found")
|
||||
}
|
||||
if !foundIndexJSON {
|
||||
t.Error("index.json not found")
|
||||
}
|
||||
}
|
|
@ -5,6 +5,7 @@ import (
|
|||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -13,20 +14,7 @@ import (
|
|||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
|
@ -35,7 +23,7 @@ func copyFileInfo(fi os.FileInfo, name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{unix.Timespec(StatAtime(st)), unix.Timespec(StatMtime(st))}
|
||||
timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
_ "crypto/sha256"
|
||||
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/containerd/containerd/fs/fstest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
|
@ -15,20 +16,7 @@ import (
|
|||
func copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
|
||||
if os.IsPermission(err) {
|
||||
// Normally if uid/gid are the same this would be a no-op, but some
|
||||
// filesystems may still return EPERM... for instance NFS does this.
|
||||
// In such a case, this is not an error.
|
||||
if dstStat, err2 := os.Lstat(name); err2 == nil {
|
||||
st2 := dstStat.Sys().(*syscall.Stat_t)
|
||||
if st.Uid == st2.Uid && st.Gid == st2.Gid {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
|
@ -37,7 +25,7 @@ func copyFileInfo(fi os.FileInfo, name string) error {
|
|||
}
|
||||
}
|
||||
|
||||
timespec := []syscall.Timespec{StatAtime(st), StatMtime(st)}
|
||||
timespec := []syscall.Timespec{sys.StatAtime(st), sys.StatMtime(st)}
|
||||
if err := syscall.UtimesNano(name, timespec); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
|
@ -11,7 +11,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/containerd/containerd/fs/fstest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -165,7 +165,6 @@ func TestParentDirectoryPermission(t *testing.T) {
|
|||
t.Fatalf("Failed diff with base: %+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateWithSameTime(t *testing.T) {
|
||||
skipDiffTestOnWindows(t)
|
||||
tt := time.Now().Truncate(time.Second)
|
||||
|
@ -179,36 +178,27 @@ func TestUpdateWithSameTime(t *testing.T) {
|
|||
fstest.CreateFile("/file-same-time", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-same-time", t1, t1),
|
||||
fstest.CreateFile("/file-truncated-time-1", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-1", tt, tt),
|
||||
fstest.Chtimes("/file-truncated-time-1", t1, t1),
|
||||
fstest.CreateFile("/file-truncated-time-2", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-2", tt, tt),
|
||||
fstest.CreateFile("/file-truncated-time-3", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-3", t1, t1),
|
||||
)
|
||||
l2 := fstest.Apply(
|
||||
fstest.CreateFile("/file-modified-time", []byte("2"), 0644),
|
||||
fstest.Chtimes("/file-modified-time", t2, t2),
|
||||
fstest.CreateFile("/file-no-change", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-no-change", t1, t1),
|
||||
fstest.Chtimes("/file-no-change", tt, tt), // use truncated time, should be regarded as no change
|
||||
fstest.CreateFile("/file-same-time", []byte("2"), 0644),
|
||||
fstest.Chtimes("/file-same-time", t1, t1),
|
||||
fstest.CreateFile("/file-truncated-time-1", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-1", t1, t1),
|
||||
fstest.CreateFile("/file-truncated-time-1", []byte("2"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-1", tt, tt),
|
||||
fstest.CreateFile("/file-truncated-time-2", []byte("2"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-2", tt, tt),
|
||||
fstest.CreateFile("/file-truncated-time-3", []byte("1"), 0644),
|
||||
fstest.Chtimes("/file-truncated-time-3", tt, tt),
|
||||
)
|
||||
diff := []TestChange{
|
||||
// "/file-same-time" excluded because matching non-zero nanosecond values
|
||||
Modify("/file-modified-time"),
|
||||
// Include changes with truncated timestamps. Comparing newly
|
||||
// extracted tars which have truncated timestamps will be
|
||||
// expected to produce changes. The expectation is that diff
|
||||
// archives are generated once and kept, newly generated diffs
|
||||
// will not consider cases where only one side is truncated.
|
||||
Modify("/file-truncated-time-1"),
|
||||
Modify("/file-truncated-time-2"),
|
||||
Modify("/file-truncated-time-3"),
|
||||
}
|
||||
|
||||
if err := testDiffWithBase(l1, l2, diff); err != nil {
|
|
@ -8,7 +8,8 @@ import (
|
|||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/testutil"
|
||||
"github.com/containerd/containerd/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testSupportsDType(t *testing.T, expected bool, mkfs ...string) {
|
||||
|
@ -41,9 +42,7 @@ func testSupportsDType(t *testing.T, expected bool, mkfs ...string) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("Supports d_type: %v", result)
|
||||
if expected != result {
|
||||
t.Fatalf("expected %+v, got: %+v", expected, result)
|
||||
}
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestSupportsDTypeWithFType0XFS(t *testing.T) {
|
|
@ -3,7 +3,7 @@ package fs
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/testutil"
|
||||
"github.com/containerd/containerd/testutil"
|
||||
)
|
||||
|
||||
func TestRequiresRootNOP(t *testing.T) {
|
||||
|
@ -17,7 +17,7 @@ func TestRequiresRootNOP(t *testing.T) {
|
|||
// that file is only built on linux. Since the Makefile is not
|
||||
// go build tag aware it sees this file and then tries to run
|
||||
// the following command on all platforms: "go test ...
|
||||
// github.com/containerd/continuity/fs -test.root". On
|
||||
// github.com/containerd/containerd/fs -test.root". On
|
||||
// non-linux platforms this fails because there are no tests in
|
||||
// the "fs" package that reference testutil.RequiresRoot. To
|
||||
// fix this problem we'll add a reference to this symbol below.
|
|
@ -15,15 +15,6 @@ type inode struct {
|
|||
dev, ino uint64
|
||||
}
|
||||
|
||||
func newInode(stat *syscall.Stat_t) inode {
|
||||
return inode{
|
||||
// Dev is uint32 on darwin/bsd, uint64 on linux/solaris
|
||||
dev: uint64(stat.Dev), // nolint: unconvert
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
ino: uint64(stat.Ino), // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
func diskUsage(roots ...string) (Usage, error) {
|
||||
|
||||
var (
|
||||
|
@ -37,7 +28,9 @@ func diskUsage(roots ...string) (Usage, error) {
|
|||
return err
|
||||
}
|
||||
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
||||
|
@ -67,7 +60,9 @@ func diffUsage(ctx context.Context, a, b string) (Usage, error) {
|
|||
}
|
||||
|
||||
if kind == ChangeKindAdd || kind == ChangeKindModify {
|
||||
inoKey := newInode(fi.Sys().(*syscall.Stat_t))
|
||||
stat := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
|
||||
if _, ok := inodes[inoKey]; !ok {
|
||||
inodes[inoKey] = struct{}{}
|
||||
size += fi.Size()
|
|
@ -13,6 +13,5 @@ func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
|||
return 0, false
|
||||
}
|
||||
|
||||
// Ino is uint32 on bsd, uint64 on darwin/linux/solaris
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 // nolint: unconvert
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
|
||||
}
|
|
@ -71,9 +71,9 @@ func sameFile(f1, f2 *currentPath) (bool, error) {
|
|||
return false, nil
|
||||
}
|
||||
|
||||
// If the timestamp may have been truncated in both of the
|
||||
// If the timestamp may have been truncated in one of the
|
||||
// files, check content of file to determine difference
|
||||
if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 {
|
||||
if t1.Nanosecond() == 0 || t2.Nanosecond() == 0 {
|
||||
var eq bool
|
||||
if (f1.f.Mode() & os.ModeSymlink) == os.ModeSymlink {
|
||||
eq, err = compareSymlinkTarget(f1.fullPath, f2.fullPath)
|
|
@ -8,7 +8,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/continuity/fs/fstest"
|
||||
"github.com/containerd/containerd/fs/fstest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
35
vendor/github.com/containerd/containerd/grpc.go
generated
vendored
Normal file
35
vendor/github.com/containerd/containerd/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type namespaceInterceptor struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (ni namespaceInterceptor) unary(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
|
||||
_, ok := namespaces.Namespace(ctx)
|
||||
if !ok {
|
||||
ctx = namespaces.WithNamespace(ctx, ni.namespace)
|
||||
}
|
||||
return invoker(ctx, method, req, reply, cc, opts...)
|
||||
}
|
||||
|
||||
func (ni namespaceInterceptor) stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
_, ok := namespaces.Namespace(ctx)
|
||||
if !ok {
|
||||
ctx = namespaces.WithNamespace(ctx, ni.namespace)
|
||||
}
|
||||
|
||||
return streamer(ctx, desc, cc, method, opts...)
|
||||
}
|
||||
|
||||
func newNSInterceptors(ns string) (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor) {
|
||||
ni := namespaceInterceptor{
|
||||
namespace: ns,
|
||||
}
|
||||
return grpc.UnaryClientInterceptor(ni.unary), grpc.StreamClientInterceptor(ni.stream)
|
||||
}
|
46
vendor/github.com/containerd/containerd/helpers_unix_test.go
generated
vendored
Normal file
46
vendor/github.com/containerd/containerd/helpers_unix_test.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// +build !windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const newLine = "\n"
|
||||
|
||||
func withExitStatus(es int) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withProcessArgs(args ...string) oci.SpecOpts {
|
||||
return oci.WithProcessArgs(args...)
|
||||
}
|
||||
|
||||
func withCat() oci.SpecOpts {
|
||||
return oci.WithProcessArgs("cat")
|
||||
}
|
||||
|
||||
func withTrue() oci.SpecOpts {
|
||||
return oci.WithProcessArgs("true")
|
||||
}
|
||||
|
||||
func withExecExitStatus(s *specs.Process, es int) {
|
||||
s.Args = []string{"sh", "-c", fmt.Sprintf("exit %d", es)}
|
||||
}
|
||||
|
||||
func withExecArgs(s *specs.Process, args ...string) {
|
||||
s.Args = args
|
||||
}
|
||||
|
||||
var (
|
||||
withNewSnapshot = WithNewSnapshot
|
||||
withImageConfig = oci.WithImageConfig
|
||||
)
|
55
vendor/github.com/containerd/containerd/helpers_windows_test.go
generated
vendored
Normal file
55
vendor/github.com/containerd/containerd/helpers_windows_test.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
// +build windows
|
||||
|
||||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/oci"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
const newLine = "\r\n"
|
||||
|
||||
func withExitStatus(es int) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withProcessArgs(args ...string) oci.SpecOpts {
|
||||
return oci.WithProcessArgs(append([]string{"powershell", "-noprofile"}, args...)...)
|
||||
}
|
||||
|
||||
func withCat() oci.SpecOpts {
|
||||
return oci.WithProcessArgs("cmd", "/c", "more")
|
||||
}
|
||||
|
||||
func withTrue() oci.SpecOpts {
|
||||
return oci.WithProcessArgs("cmd", "/c")
|
||||
}
|
||||
|
||||
func withExecExitStatus(s *specs.Process, es int) {
|
||||
s.Args = []string{"powershell", "-noprofile", "exit", strconv.Itoa(es)}
|
||||
}
|
||||
|
||||
func withExecArgs(s *specs.Process, args ...string) {
|
||||
s.Args = append([]string{"powershell", "-noprofile"}, args...)
|
||||
}
|
||||
|
||||
func withImageConfig(i Image) oci.SpecOpts {
|
||||
return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Windows.LayerFolders = dockerLayerFolders
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func withNewSnapshot(id string, i Image) NewContainerOpts {
|
||||
// TODO: when windows has a snapshotter remove the withNewSnapshot helper
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
return nil
|
||||
}
|
||||
}
|
174
vendor/github.com/containerd/containerd/image.go
generated
vendored
Normal file
174
vendor/github.com/containerd/containerd/image.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image describes an image used by containers
|
||||
type Image interface {
|
||||
// Name of the image
|
||||
Name() string
|
||||
// Target descriptor for the image content
|
||||
Target() ocispec.Descriptor
|
||||
// Unpack unpacks the image's content into a snapshot
|
||||
Unpack(context.Context, string) error
|
||||
// RootFS returns the unpacked diffids that make up images rootfs.
|
||||
RootFS(ctx context.Context) ([]digest.Digest, error)
|
||||
// Size returns the total size of the image's packed resources.
|
||||
Size(ctx context.Context) (int64, error)
|
||||
// Config descriptor for the image.
|
||||
Config(ctx context.Context) (ocispec.Descriptor, error)
|
||||
// IsUnpacked returns whether or not an image is unpacked.
|
||||
IsUnpacked(context.Context, string) (bool, error)
|
||||
// ContentStore provides a content store which contains image blob data
|
||||
ContentStore() content.Store
|
||||
}
|
||||
|
||||
var _ = (Image)(&image{})
|
||||
|
||||
type image struct {
|
||||
client *Client
|
||||
|
||||
i images.Image
|
||||
}
|
||||
|
||||
func (i *image) Name() string {
|
||||
return i.i.Name
|
||||
}
|
||||
|
||||
func (i *image) Target() ocispec.Descriptor {
|
||||
return i.i.Target
|
||||
}
|
||||
|
||||
func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
|
||||
provider := i.client.ContentStore()
|
||||
return i.i.RootFS(ctx, provider, platforms.Default())
|
||||
}
|
||||
|
||||
func (i *image) Size(ctx context.Context) (int64, error) {
|
||||
provider := i.client.ContentStore()
|
||||
return i.i.Size(ctx, provider, platforms.Default())
|
||||
}
|
||||
|
||||
func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
|
||||
provider := i.client.ContentStore()
|
||||
return i.i.Config(ctx, provider, platforms.Default())
|
||||
}
|
||||
|
||||
func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, error) {
|
||||
sn := i.client.SnapshotService(snapshotterName)
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
diffs, err := i.i.RootFS(ctx, cs, platforms.Default())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
chainID := identity.ChainID(diffs)
|
||||
_, err = sn.Stat(ctx, chainID.String())
|
||||
if err == nil {
|
||||
return true, nil
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
|
||||
ctx, done, err := i.client.WithLease(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer done()
|
||||
|
||||
layers, err := i.getLayers(ctx, platforms.Default())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
sn = i.client.SnapshotService(snapshotterName)
|
||||
a = i.client.DiffService()
|
||||
cs = i.client.ContentStore()
|
||||
|
||||
chain []digest.Digest
|
||||
unpacked bool
|
||||
)
|
||||
for _, layer := range layers {
|
||||
labels := map[string]string{
|
||||
"containerd.io/uncompressed": layer.Diff.Digest.String(),
|
||||
}
|
||||
|
||||
unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshots.WithLabels(labels))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chain = append(chain, layer.Diff.Digest)
|
||||
}
|
||||
|
||||
if unpacked {
|
||||
desc, err := i.i.Config(ctx, cs, platforms.Default())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rootfs := identity.ChainID(chain).String()
|
||||
|
||||
cinfo := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotterName): rootfs,
|
||||
},
|
||||
}
|
||||
if _, err := cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", snapshotterName)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *image) getLayers(ctx context.Context, platform string) ([]rootfs.Layer, error) {
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diffIDs, err := i.i.RootFS(ctx, cs, platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve rootfs")
|
||||
}
|
||||
if len(diffIDs) != len(manifest.Layers) {
|
||||
return nil, errors.Errorf("mismatched image rootfs and manifest layers")
|
||||
}
|
||||
layers := make([]rootfs.Layer, len(diffIDs))
|
||||
for i := range diffIDs {
|
||||
layers[i].Diff = ocispec.Descriptor{
|
||||
// TODO: derive media type from compressed type
|
||||
MediaType: ocispec.MediaTypeImageLayer,
|
||||
Digest: diffIDs[i],
|
||||
}
|
||||
layers[i].Blob = manifest.Layers[i]
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (i *image) ContentStore() content.Store {
|
||||
return i.client.ContentStore()
|
||||
}
|
136
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
Normal file
136
vendor/github.com/containerd/containerd/image_store.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
imagesapi "github.com/containerd/containerd/api/services/images/v1"
|
||||
"github.com/containerd/containerd/api/types"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type remoteImages struct {
|
||||
client imagesapi.ImagesClient
|
||||
}
|
||||
|
||||
// NewImageStoreFromClient returns a new image store client
|
||||
func NewImageStoreFromClient(client imagesapi.ImagesClient) images.Store {
|
||||
return &remoteImages{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *remoteImages) Get(ctx context.Context, name string) (images.Image, error) {
|
||||
resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{
|
||||
Name: name,
|
||||
})
|
||||
if err != nil {
|
||||
return images.Image{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return imageFromProto(resp.Image), nil
|
||||
}
|
||||
|
||||
func (s *remoteImages) List(ctx context.Context, filters ...string) ([]images.Image, error) {
|
||||
resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return imagesFromProto(resp.Images), nil
|
||||
}
|
||||
|
||||
func (s *remoteImages) Create(ctx context.Context, image images.Image) (images.Image, error) {
|
||||
created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{
|
||||
Image: imageToProto(&image),
|
||||
})
|
||||
if err != nil {
|
||||
return images.Image{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return imageFromProto(&created.Image), nil
|
||||
}
|
||||
|
||||
func (s *remoteImages) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
|
||||
var updateMask *ptypes.FieldMask
|
||||
if len(fieldpaths) > 0 {
|
||||
updateMask = &ptypes.FieldMask{
|
||||
Paths: fieldpaths,
|
||||
}
|
||||
}
|
||||
|
||||
updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{
|
||||
Image: imageToProto(&image),
|
||||
UpdateMask: updateMask,
|
||||
})
|
||||
if err != nil {
|
||||
return images.Image{}, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
return imageFromProto(&updated.Image), nil
|
||||
}
|
||||
|
||||
func (s *remoteImages) Delete(ctx context.Context, name string, opts ...images.DeleteOpt) error {
|
||||
var do images.DeleteOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &do); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{
|
||||
Name: name,
|
||||
Sync: do.Synchronous,
|
||||
})
|
||||
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
func imageToProto(image *images.Image) imagesapi.Image {
|
||||
return imagesapi.Image{
|
||||
Name: image.Name,
|
||||
Labels: image.Labels,
|
||||
Target: descToProto(&image.Target),
|
||||
CreatedAt: image.CreatedAt,
|
||||
UpdatedAt: image.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func imageFromProto(imagepb *imagesapi.Image) images.Image {
|
||||
return images.Image{
|
||||
Name: imagepb.Name,
|
||||
Labels: imagepb.Labels,
|
||||
Target: descFromProto(&imagepb.Target),
|
||||
CreatedAt: imagepb.CreatedAt,
|
||||
UpdatedAt: imagepb.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
|
||||
var images []images.Image
|
||||
|
||||
for _, image := range imagespb {
|
||||
images = append(images, imageFromProto(&image))
|
||||
}
|
||||
|
||||
return images
|
||||
}
|
||||
|
||||
func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
|
||||
return ocispec.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size: desc.Size_,
|
||||
Digest: desc.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
func descToProto(desc *ocispec.Descriptor) types.Descriptor {
|
||||
return types.Descriptor{
|
||||
MediaType: desc.MediaType,
|
||||
Size_: desc.Size,
|
||||
Digest: desc.Digest,
|
||||
}
|
||||
}
|
58
vendor/github.com/containerd/containerd/image_test.go
generated
vendored
Normal file
58
vendor/github.com/containerd/containerd/image_test.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
|
||||
func TestImageIsUnpacked(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
|
||||
const imageName = "docker.io/library/busybox:latest"
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
client, err := newClient(t, address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
// Cleanup
|
||||
err = client.ImageService().Delete(ctx, imageName)
|
||||
if err != nil && !errdefs.IsNotFound(err) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// By default pull does not unpack an image
|
||||
image, err := client.Pull(ctx, imageName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Check that image is not unpacked
|
||||
unpacked, err := image.IsUnpacked(ctx, DefaultSnapshotter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if unpacked {
|
||||
t.Fatalf("image should not be unpacked")
|
||||
}
|
||||
|
||||
// Check that image is unpacked
|
||||
err = image.Unpack(ctx, DefaultSnapshotter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
unpacked, err = image.IsUnpacked(ctx, DefaultSnapshotter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !unpacked {
|
||||
t.Fatalf("image should be unpacked")
|
||||
}
|
||||
}
|
135
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
Normal file
135
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSkipDesc is used to skip processing of a descriptor and
|
||||
// its descendants.
|
||||
ErrSkipDesc = fmt.Errorf("skip descriptor")
|
||||
|
||||
// ErrStopHandler is used to signify that the descriptor
|
||||
// has been handled and should not be handled further.
|
||||
// This applies only to a single descriptor in a handler
|
||||
// chain and does not apply to descendant descriptors.
|
||||
ErrStopHandler = fmt.Errorf("stop handler")
|
||||
)
|
||||
|
||||
// Handler handles image manifests
|
||||
type Handler interface {
|
||||
Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
|
||||
}
|
||||
|
||||
// HandlerFunc function implementing the Handler interface
|
||||
type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
|
||||
|
||||
// Handle image manifests
|
||||
func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
// Handlers returns a handler that will run the handlers in sequence.
|
||||
//
|
||||
// A handler may return `ErrStopHandler` to stop calling additional handlers
|
||||
func Handlers(handlers ...Handler) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
var children []ocispec.Descriptor
|
||||
for _, handler := range handlers {
|
||||
ch, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrStopHandler {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
children = append(children, ch...)
|
||||
}
|
||||
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Walk the resources of an image and call the handler for each. If the handler
|
||||
// decodes the sub-resources for each image,
|
||||
//
|
||||
// This differs from dispatch in that each sibling resource is considered
|
||||
// synchronously.
|
||||
func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
|
||||
for _, desc := range descs {
|
||||
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSkipDesc {
|
||||
continue // don't traverse the children.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
if err := Walk(ctx, handler, children...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dispatch runs the provided handler for content specified by the descriptors.
|
||||
// If the handler decode subresources, they will be visited, as well.
|
||||
//
|
||||
// Handlers for siblings are run in parallel on the provided descriptors. A
|
||||
// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
|
||||
// any children.
|
||||
//
|
||||
// Typically, this function will be used with `FetchHandler`, often composed
|
||||
// with other handlers.
|
||||
//
|
||||
// If any handler returns an error, the dispatch session will be canceled.
|
||||
func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, desc := range descs {
|
||||
desc := desc
|
||||
|
||||
eg.Go(func() error {
|
||||
desc := desc
|
||||
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == ErrSkipDesc {
|
||||
return nil // don't traverse the children.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
return Dispatch(ctx, handler, children...)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
||||
// ChildrenHandler decodes well-known manifest types and returns their children.
|
||||
//
|
||||
// This is useful for supporting recursive fetch and other use cases where you
|
||||
// want to do a full walk of resources.
|
||||
//
|
||||
// One can also replace this with another implementation to allow descending of
|
||||
// arbitrary types.
|
||||
func ChildrenHandler(provider content.Provider, platform string) HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
return Children(ctx, provider, desc, platform)
|
||||
}
|
||||
}
|
369
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
Normal file
369
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
Normal file
|
@ -0,0 +1,369 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image provides the model for how containerd views container images.
|
||||
type Image struct {
|
||||
// Name of the image.
|
||||
//
|
||||
// To be pulled, it must be a reference compatible with resolvers.
|
||||
//
|
||||
// This field is required.
|
||||
Name string
|
||||
|
||||
// Labels provide runtime decoration for the image record.
|
||||
//
|
||||
// There is no default behavior for how these labels are propagated. They
|
||||
// only decorate the static metadata object.
|
||||
//
|
||||
// This field is optional.
|
||||
Labels map[string]string
|
||||
|
||||
// Target describes the root content for this image. Typically, this is
|
||||
// a manifest, index or manifest list.
|
||||
Target ocispec.Descriptor
|
||||
|
||||
CreatedAt, UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// DeleteOptions provide options on image delete
|
||||
type DeleteOptions struct {
|
||||
Synchronous bool
|
||||
}
|
||||
|
||||
// DeleteOpt allows configuring a delete operation
|
||||
type DeleteOpt func(context.Context, *DeleteOptions) error
|
||||
|
||||
// SynchronousDelete is used to indicate that an image deletion and removal of
|
||||
// the image resources should occur synchronously before returning a result.
|
||||
func SynchronousDelete() DeleteOpt {
|
||||
return func(ctx context.Context, o *DeleteOptions) error {
|
||||
o.Synchronous = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Store and interact with images
|
||||
type Store interface {
|
||||
Get(ctx context.Context, name string) (Image, error)
|
||||
List(ctx context.Context, filters ...string) ([]Image, error)
|
||||
Create(ctx context.Context, image Image) (Image, error)
|
||||
|
||||
// Update will replace the data in the store with the provided image. If
|
||||
// one or more fieldpaths are provided, only those fields will be updated.
|
||||
Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
|
||||
|
||||
Delete(ctx context.Context, name string, opts ...DeleteOpt) error
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Many of these functions make strong platform assumptions,
|
||||
// which are untrue in a lot of cases. More refactoring must be done here to
|
||||
// make this work in all cases.
|
||||
|
||||
// Config resolves the image configuration descriptor.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func (image *Image) Config(ctx context.Context, provider content.Provider, platform string) (ocispec.Descriptor, error) {
|
||||
return Config(ctx, provider, image.Target, platform)
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
//
|
||||
// These are used to verify that a set of layers unpacked to the expected
|
||||
// values.
|
||||
func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform string) ([]digest.Digest, error) {
|
||||
desc, err := image.Config(ctx, provider, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return RootFS(ctx, provider, desc)
|
||||
}
|
||||
|
||||
// Size returns the total size of an image's packed resources.
|
||||
func (image *Image) Size(ctx context.Context, provider content.Provider, platform string) (int64, error) {
|
||||
var size int64
|
||||
return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Size < 0 {
|
||||
return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
}
|
||||
size += desc.Size
|
||||
return nil, nil
|
||||
}), ChildrenHandler(provider, platform)), image.Target)
|
||||
}
|
||||
|
||||
// Manifest resolves a manifest from the image for the given platform.
|
||||
//
|
||||
// TODO(stevvooe): This violates the current platform agnostic approach to this
|
||||
// package by returning a specific manifest type. We'll need to refactor this
|
||||
// to return a manifest descriptor or decide that we want to bring the API in
|
||||
// this direction because this abstraction is not needed.`
|
||||
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Manifest, error) {
|
||||
var (
|
||||
matcher platforms.Matcher
|
||||
m *ocispec.Manifest
|
||||
err error
|
||||
)
|
||||
if platform != "" {
|
||||
matcher, err = platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform != "" {
|
||||
if desc.Platform != nil && !matcher.Match(*desc.Platform) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if desc.Platform == nil {
|
||||
p, err := content.ReadBlob(ctx, provider, manifest.Config.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !matcher.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
m = &manifest
|
||||
|
||||
return nil, nil
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(p, &idx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform == "" {
|
||||
return idx.Manifests, nil
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
for _, d := range idx.Manifests {
|
||||
if d.Platform == nil || matcher.Match(*d.Platform) {
|
||||
descs = append(descs, d)
|
||||
}
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||
}), image); err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return ocispec.Manifest{}, errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// Config resolves the image configuration descriptor using a content provided
|
||||
// to resolve child resources on the image.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Descriptor, error) {
|
||||
manifest, err := Manifest(ctx, provider, image, platform)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return manifest.Config, err
|
||||
}
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
|
||||
var platformSpecs []ocispec.Platform
|
||||
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Platform != nil {
|
||||
platformSpecs = append(platformSpecs, *desc.Platform)
|
||||
return nil, ErrSkipDesc
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platformSpecs = append(platformSpecs,
|
||||
platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
|
||||
}
|
||||
return nil, nil
|
||||
}), ChildrenHandler(provider, "")), image)
|
||||
}
|
||||
|
||||
// Check returns nil if the all components of an image are available in the
|
||||
// provider for the specified platform.
|
||||
//
|
||||
// If available is true, the caller can assume that required represents the
|
||||
// complete set of content required for the image.
|
||||
//
|
||||
// missing will have the components that are part of required but not avaiiable
|
||||
// in the provider.
|
||||
//
|
||||
// If there is a problem resolving content, an error will be returned.
|
||||
func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (available bool, required, present, missing []ocispec.Descriptor, err error) {
|
||||
mfst, err := Manifest(ctx, provider, image, platform)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
|
||||
}
|
||||
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): It is possible that referenced conponents could have
|
||||
// children, but this is rare. For now, we ignore this and only verify
|
||||
// that manfiest components are present.
|
||||
required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...)
|
||||
|
||||
for _, desc := range required {
|
||||
ra, err := provider.ReaderAt(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
missing = append(missing, desc)
|
||||
continue
|
||||
} else {
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
|
||||
}
|
||||
}
|
||||
ra.Close()
|
||||
present = append(present, desc)
|
||||
|
||||
}
|
||||
|
||||
return true, required, present, missing, nil
|
||||
}
|
||||
|
||||
// Children returns the immediate children of content described by the descriptor.
|
||||
func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform string) ([]ocispec.Descriptor, error) {
|
||||
var descs []ocispec.Descriptor
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We just assume oci manifest, for now. There may be
|
||||
// subtle differences from the docker version.
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
descs = append(descs, manifest.Config)
|
||||
descs = append(descs, manifest.Layers...)
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(p, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform != "" {
|
||||
matcher, err := platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, d := range index.Manifests {
|
||||
if d.Platform == nil || matcher.Match(*d.Platform) {
|
||||
descs = append(descs, d)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
descs = append(descs, index.Manifests...)
|
||||
}
|
||||
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
|
||||
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
||||
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
||||
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
|
||||
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
default:
|
||||
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
//
|
||||
// These are used to verify that a set of layers unpacked to the expected
|
||||
// values.
|
||||
func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) {
|
||||
p, err := content.ReadBlob(ctx, provider, configDesc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var config ocispec.Image
|
||||
if err := json.Unmarshal(p, &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(stevvooe): Remove this bit when OCI structure uses correct type for
|
||||
// rootfs.DiffIDs.
|
||||
var diffIDs []digest.Digest
|
||||
for _, diffID := range config.RootFS.DiffIDs {
|
||||
diffIDs = append(diffIDs, digest.Digest(diffID))
|
||||
}
|
||||
|
||||
return diffIDs, nil
|
||||
}
|
21
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
21
vendor/github.com/containerd/containerd/images/importexport.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Importer is the interface for image importer.
|
||||
type Importer interface {
|
||||
// Import imports an image from a tar stream.
|
||||
Import(ctx context.Context, store content.Store, reader io.Reader) ([]Image, error)
|
||||
}
|
||||
|
||||
// Exporter is the interface for image exporter.
|
||||
type Exporter interface {
|
||||
// Export exports an image to a tar stream.
|
||||
Export(ctx context.Context, store content.Store, desc ocispec.Descriptor, writer io.Writer) error
|
||||
}
|
23
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
Normal file
23
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package images
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
//
|
||||
// oci components are generally referenced directly, although we may centralize
|
||||
// here for clarity.
|
||||
const (
|
||||
MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
|
||||
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
|
||||
// Checkpoint/Restore Media Types
|
||||
MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
|
||||
MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
|
||||
MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar"
|
||||
MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar"
|
||||
MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto"
|
||||
// Legacy Docker schema1 manifest
|
||||
MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
)
|
47
vendor/github.com/containerd/containerd/import_test.go
generated
vendored
Normal file
47
vendor/github.com/containerd/containerd/import_test.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/images/oci"
|
||||
)
|
||||
|
||||
// TestOCIExportAndImport exports testImage as a tar stream,
|
||||
// and import the tar stream as a new image.
|
||||
func TestOCIExportAndImport(t *testing.T) {
|
||||
// TODO: support windows
|
||||
if testing.Short() || runtime.GOOS == "windows" {
|
||||
t.Skip()
|
||||
}
|
||||
ctx, cancel := testContext()
|
||||
defer cancel()
|
||||
|
||||
client, err := New(address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
pulled, err := client.Pull(ctx, testImage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
exported, err := client.Export(ctx, &oci.V1Exporter{}, pulled.Target())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
imgrecs, err := client.Import(ctx, &oci.V1Importer{ImageName: "foo/bar:"}, exported)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, imgrec := range imgrecs {
|
||||
err = client.ImageService().Delete(ctx, imgrec.Name())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
92
vendor/github.com/containerd/containerd/lease.go
generated
vendored
Normal file
92
vendor/github.com/containerd/containerd/lease.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
|||
package containerd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
leasesapi "github.com/containerd/containerd/api/services/leases/v1"
|
||||
"github.com/containerd/containerd/leases"
|
||||
)
|
||||
|
||||
// Lease is used to hold a reference to active resources which have not been
|
||||
// referenced by a root resource. This is useful for preventing garbage
|
||||
// collection of resources while they are actively being updated.
|
||||
type Lease struct {
|
||||
id string
|
||||
createdAt time.Time
|
||||
|
||||
client *Client
|
||||
}
|
||||
|
||||
// CreateLease creates a new lease
|
||||
func (c *Client) CreateLease(ctx context.Context) (Lease, error) {
|
||||
lapi := leasesapi.NewLeasesClient(c.conn)
|
||||
resp, err := lapi.Create(ctx, &leasesapi.CreateRequest{})
|
||||
if err != nil {
|
||||
return Lease{}, err
|
||||
}
|
||||
|
||||
return Lease{
|
||||
id: resp.Lease.ID,
|
||||
client: c,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListLeases lists active leases
|
||||
func (c *Client) ListLeases(ctx context.Context) ([]Lease, error) {
|
||||
lapi := leasesapi.NewLeasesClient(c.conn)
|
||||
resp, err := lapi.List(ctx, &leasesapi.ListRequest{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
leases := make([]Lease, len(resp.Leases))
|
||||
for i := range resp.Leases {
|
||||
leases[i] = Lease{
|
||||
id: resp.Leases[i].ID,
|
||||
createdAt: resp.Leases[i].CreatedAt,
|
||||
client: c,
|
||||
}
|
||||
}
|
||||
|
||||
return leases, nil
|
||||
}
|
||||
|
||||
// WithLease attaches a lease on the context
|
||||
func (c *Client) WithLease(ctx context.Context) (context.Context, func() error, error) {
|
||||
_, ok := leases.Lease(ctx)
|
||||
if ok {
|
||||
return ctx, func() error {
|
||||
return nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
l, err := c.CreateLease(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ctx = leases.WithLease(ctx, l.ID())
|
||||
return ctx, func() error {
|
||||
return l.Delete(ctx)
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ID returns the lease ID
|
||||
func (l Lease) ID() string {
|
||||
return l.id
|
||||
}
|
||||
|
||||
// CreatedAt returns the time at which the lease was created
|
||||
func (l Lease) CreatedAt() time.Time {
|
||||
return l.createdAt
|
||||
}
|
||||
|
||||
// Delete deletes the lease, removing the reference to all resources created
|
||||
// during the lease.
|
||||
func (l Lease) Delete(ctx context.Context) error {
|
||||
lapi := leasesapi.NewLeasesClient(l.client.conn)
|
||||
_, err := lapi.Delete(ctx, &leasesapi.DeleteRequest{
|
||||
ID: l.id,
|
||||
})
|
||||
return err
|
||||
}
|
81
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
81
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// G is an alias for GetLogger.
|
||||
//
|
||||
// We may want to define this locally to a package to get package tagged log
|
||||
// messages.
|
||||
G = GetLogger
|
||||
|
||||
// L is an alias for the the standard logger.
|
||||
L = logrus.NewEntry(logrus.StandardLogger())
|
||||
)
|
||||
|
||||
type (
|
||||
loggerKey struct{}
|
||||
moduleKey struct{}
|
||||
)
|
||||
|
||||
// WithLogger returns a new context with the provided logger. Use in
|
||||
// combination with logger.WithField(s) for great effect.
|
||||
func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
|
||||
return context.WithValue(ctx, loggerKey{}, logger)
|
||||
}
|
||||
|
||||
// GetLogger retrieves the current logger from the context. If no logger is
|
||||
// available, the default logger is returned.
|
||||
func GetLogger(ctx context.Context) *logrus.Entry {
|
||||
logger := ctx.Value(loggerKey{})
|
||||
|
||||
if logger == nil {
|
||||
return L
|
||||
}
|
||||
|
||||
return logger.(*logrus.Entry)
|
||||
}
|
||||
|
||||
// WithModule adds the module to the context, appending it with a slash if a
|
||||
// module already exists. A module is just an roughly correlated defined by the
|
||||
// call tree for a given context.
|
||||
//
|
||||
// As an example, we might have a "node" module already part of a context. If
|
||||
// this function is called with "tls", the new value of module will be
|
||||
// "node/tls".
|
||||
//
|
||||
// Modules represent the call path. If the new module and last module are the
|
||||
// same, a new module entry will not be created. If the new module and old
|
||||
// older module are the same but separated by other modules, the cycle will be
|
||||
// represented by the module path.
|
||||
func WithModule(ctx context.Context, module string) context.Context {
|
||||
parent := GetModulePath(ctx)
|
||||
|
||||
if parent != "" {
|
||||
// don't re-append module when module is the same.
|
||||
if path.Base(parent) == module {
|
||||
return ctx
|
||||
}
|
||||
|
||||
module = path.Join(parent, module)
|
||||
}
|
||||
|
||||
ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module))
|
||||
return context.WithValue(ctx, moduleKey{}, module)
|
||||
}
|
||||
|
||||
// GetModulePath returns the module path for the provided context. If no module
|
||||
// is set, an empty string is returned.
|
||||
func GetModulePath(ctx context.Context) string {
|
||||
module := ctx.Value(moduleKey{})
|
||||
if module == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return module.(string)
|
||||
}
|
41
vendor/github.com/containerd/containerd/log/context_test.go
generated
vendored
Normal file
41
vendor/github.com/containerd/containerd/log/context_test.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoggerContext(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
assert.Equal(t, GetLogger(ctx), L) // should be same as L variable
|
||||
assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same.
|
||||
|
||||
ctx = WithLogger(ctx, G(ctx).WithField("test", "one"))
|
||||
assert.Equal(t, GetLogger(ctx).Data["test"], "one")
|
||||
assert.Equal(t, G(ctx), GetLogger(ctx)) // these should be the same.
|
||||
}
|
||||
|
||||
func TestModuleContext(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
assert.Equal(t, GetModulePath(ctx), "")
|
||||
|
||||
ctx = WithModule(ctx, "a") // basic behavior
|
||||
assert.Equal(t, GetModulePath(ctx), "a")
|
||||
logger := GetLogger(ctx)
|
||||
assert.Equal(t, logger.Data["module"], "a")
|
||||
|
||||
parent, ctx := ctx, WithModule(ctx, "a")
|
||||
assert.Equal(t, ctx, parent) // should be a no-op
|
||||
assert.Equal(t, GetModulePath(ctx), "a")
|
||||
assert.Equal(t, GetLogger(ctx).Data["module"], "a")
|
||||
|
||||
ctx = WithModule(ctx, "b") // new module
|
||||
assert.Equal(t, GetModulePath(ctx), "a/b")
|
||||
assert.Equal(t, GetLogger(ctx).Data["module"], "a/b")
|
||||
|
||||
ctx = WithModule(ctx, "c") // new module
|
||||
assert.Equal(t, GetModulePath(ctx), "a/b/c")
|
||||
assert.Equal(t, GetLogger(ctx).Data["module"], "a/b/c")
|
||||
}
|
12
vendor/github.com/containerd/containerd/log/grpc.go
generated
vendored
Normal file
12
vendor/github.com/containerd/containerd/log/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
package log
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
|
||||
}
|
46
vendor/github.com/containerd/containerd/mount/lookup_unix.go
generated
vendored
Normal file
46
vendor/github.com/containerd/containerd/mount/lookup_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// +build !windows
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Lookup returns the mount info corresponds to the path.
|
||||
func Lookup(dir string) (Info, error) {
|
||||
var dirStat syscall.Stat_t
|
||||
dir = filepath.Clean(dir)
|
||||
if err := syscall.Stat(dir, &dirStat); err != nil {
|
||||
return Info{}, errors.Wrapf(err, "failed to access %q", dir)
|
||||
}
|
||||
|
||||
mounts, err := Self()
|
||||
if err != nil {
|
||||
return Info{}, err
|
||||
}
|
||||
|
||||
// Sort descending order by Info.Mountpoint
|
||||
sort.Slice(mounts, func(i, j int) bool {
|
||||
return mounts[j].Mountpoint < mounts[i].Mountpoint
|
||||
})
|
||||
for _, m := range mounts {
|
||||
// Note that m.{Major, Minor} are generally unreliable for our purpose here
|
||||
// https://www.spinics.net/lists/linux-btrfs/msg58908.html
|
||||
var st syscall.Stat_t
|
||||
if err := syscall.Stat(m.Mountpoint, &st); err != nil {
|
||||
// may fail; ignore err
|
||||
continue
|
||||
}
|
||||
if st.Dev == dirStat.Dev && strings.HasPrefix(dir, m.Mountpoint) {
|
||||
return m, nil
|
||||
}
|
||||
}
|
||||
|
||||
return Info{}, fmt.Errorf("failed to find the mount info for %q", dir)
|
||||
}
|
13
vendor/github.com/containerd/containerd/mount/lookup_unsupported.go
generated
vendored
Normal file
13
vendor/github.com/containerd/containerd/mount/lookup_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// +build windows
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// Lookup returns the mount info corresponds to the path.
|
||||
func Lookup(dir string) (Info, error) {
|
||||
return Info{}, fmt.Errorf("mount.Lookup is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
24
vendor/github.com/containerd/containerd/mount/mount.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/mount/mount.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
package mount
|
||||
|
||||
// Mount is the lingua franca of containerd. A mount represents a
|
||||
// serialized mount syscall. Components either emit or consume mounts.
|
||||
type Mount struct {
|
||||
// Type specifies the host-specific of the mount.
|
||||
Type string
|
||||
// Source specifies where to mount from. Depending on the host system, this
|
||||
// can be a source path or device.
|
||||
Source string
|
||||
// Options contains zero or more fstab-style mount options. Typically,
|
||||
// these are platform specific.
|
||||
Options []string
|
||||
}
|
||||
|
||||
// All mounts all the provided mounts to the provided target
|
||||
func All(mounts []Mount, target string) error {
|
||||
for _, m := range mounts {
|
||||
if err := m.Mount(target); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
141
vendor/github.com/containerd/containerd/mount/mount_linux.go
generated
vendored
Normal file
141
vendor/github.com/containerd/containerd/mount/mount_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,141 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Mount to the provided target path
|
||||
func (m *Mount) Mount(target string) error {
|
||||
flags, data := parseMountOptions(m.Options)
|
||||
|
||||
// propagation types.
|
||||
const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
|
||||
|
||||
// Ensure propagation type change flags aren't included in other calls.
|
||||
oflags := flags &^ ptypes
|
||||
|
||||
// In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077).
|
||||
if flags&unix.MS_REMOUNT == 0 || data != "" {
|
||||
// Initial call applying all non-propagation flags for mount
|
||||
// or remount with changed data
|
||||
if err := unix.Mount(m.Source, target, m.Type, uintptr(oflags), data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if flags&ptypes != 0 {
|
||||
// Change the propagation type.
|
||||
const pflags = ptypes | unix.MS_REC | unix.MS_SILENT
|
||||
if err := unix.Mount("", target, "", uintptr(flags&pflags), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
const broflags = unix.MS_BIND | unix.MS_RDONLY
|
||||
if oflags&broflags == broflags {
|
||||
// Remount the bind to apply read only.
|
||||
return unix.Mount("", target, "", uintptr(oflags|unix.MS_REMOUNT), "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount the provided mount path with the flags
|
||||
func Unmount(target string, flags int) error {
|
||||
if err := unmount(target, flags); err != nil && err != unix.EINVAL {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flags int) error {
|
||||
for i := 0; i < 50; i++ {
|
||||
if err := unix.Unmount(target, flags); err != nil {
|
||||
switch err {
|
||||
case unix.EBUSY:
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
continue
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.Wrapf(unix.EBUSY, "failed to unmount target %s", target)
|
||||
}
|
||||
|
||||
// UnmountAll repeatedly unmounts the given mount point until there
|
||||
// are no mounts remaining (EINVAL is returned by mount), which is
|
||||
// useful for undoing a stack of mounts on the same mount point.
|
||||
func UnmountAll(mount string, flags int) error {
|
||||
for {
|
||||
if err := unmount(mount, flags); err != nil {
|
||||
// EINVAL is returned if the target is not a
|
||||
// mount point, indicating that we are
|
||||
// done. It can also indicate a few other
|
||||
// things (such as invalid flags) which we
|
||||
// unfortunately end up squelching here too.
|
||||
if err == unix.EINVAL {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parseMountOptions takes fstab style mount options and parses them for
|
||||
// use with a standard mount() syscall
|
||||
func parseMountOptions(options []string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"async": {true, unix.MS_SYNCHRONOUS},
|
||||
"atime": {true, unix.MS_NOATIME},
|
||||
"bind": {false, unix.MS_BIND},
|
||||
"defaults": {false, 0},
|
||||
"dev": {true, unix.MS_NODEV},
|
||||
"diratime": {true, unix.MS_NODIRATIME},
|
||||
"dirsync": {false, unix.MS_DIRSYNC},
|
||||
"exec": {true, unix.MS_NOEXEC},
|
||||
"mand": {false, unix.MS_MANDLOCK},
|
||||
"noatime": {false, unix.MS_NOATIME},
|
||||
"nodev": {false, unix.MS_NODEV},
|
||||
"nodiratime": {false, unix.MS_NODIRATIME},
|
||||
"noexec": {false, unix.MS_NOEXEC},
|
||||
"nomand": {true, unix.MS_MANDLOCK},
|
||||
"norelatime": {true, unix.MS_RELATIME},
|
||||
"nostrictatime": {true, unix.MS_STRICTATIME},
|
||||
"nosuid": {false, unix.MS_NOSUID},
|
||||
"rbind": {false, unix.MS_BIND | unix.MS_REC},
|
||||
"relatime": {false, unix.MS_RELATIME},
|
||||
"remount": {false, unix.MS_REMOUNT},
|
||||
"ro": {false, unix.MS_RDONLY},
|
||||
"rw": {true, unix.MS_RDONLY},
|
||||
"strictatime": {false, unix.MS_STRICTATIME},
|
||||
"suid": {true, unix.MS_NOSUID},
|
||||
"sync": {false, unix.MS_SYNCHRONOUS},
|
||||
}
|
||||
for _, o := range options {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &^= f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
25
vendor/github.com/containerd/containerd/mount/mount_unix.go
generated
vendored
Normal file
25
vendor/github.com/containerd/containerd/mount/mount_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// +build darwin freebsd
|
||||
|
||||
package mount
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrNotImplementOnUnix is returned for methods that are not implemented
|
||||
ErrNotImplementOnUnix = errors.New("not implemented under unix")
|
||||
)
|
||||
|
||||
// Mount is not implemented on this platform
|
||||
func (m *Mount) Mount(target string) error {
|
||||
return ErrNotImplementOnUnix
|
||||
}
|
||||
|
||||
// Unmount is not implemented on this platform
|
||||
func Unmount(mount string, flags int) error {
|
||||
return ErrNotImplementOnUnix
|
||||
}
|
||||
|
||||
// UnmountAll is not implemented on this platform
|
||||
func UnmountAll(mount string, flags int) error {
|
||||
return ErrNotImplementOnUnix
|
||||
}
|
23
vendor/github.com/containerd/containerd/mount/mount_windows.go
generated
vendored
Normal file
23
vendor/github.com/containerd/containerd/mount/mount_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
package mount
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
var (
|
||||
// ErrNotImplementOnWindows is returned when an action is not implemented for windows
|
||||
ErrNotImplementOnWindows = errors.New("not implemented under windows")
|
||||
)
|
||||
|
||||
// Mount to the provided target
|
||||
func (m *Mount) Mount(target string) error {
|
||||
return ErrNotImplementOnWindows
|
||||
}
|
||||
|
||||
// Unmount the mount at the provided path
|
||||
func Unmount(mount string, flags int) error {
|
||||
return ErrNotImplementOnWindows
|
||||
}
|
||||
|
||||
// UnmountAll mounts at the provided path
|
||||
func UnmountAll(mount string, flags int) error {
|
||||
return ErrNotImplementOnWindows
|
||||
}
|
40
vendor/github.com/containerd/containerd/mount/mountinfo.go
generated
vendored
Normal file
40
vendor/github.com/containerd/containerd/mount/mountinfo.go
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package mount
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
type Info struct {
|
||||
// ID is a unique identifier of the mount (may be reused after umount).
|
||||
ID int
|
||||
|
||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||
// mount tree).
|
||||
Parent int
|
||||
|
||||
// Major indicates one half of the device ID which identifies the device class.
|
||||
Major int
|
||||
|
||||
// Minor indicates one half of the device ID which identifies a specific
|
||||
// instance of device.
|
||||
Minor int
|
||||
|
||||
// Root of the mount within the filesystem.
|
||||
Root string
|
||||
|
||||
// Mountpoint indicates the mount point relative to the process's root.
|
||||
Mountpoint string
|
||||
|
||||
// Options represents mount-specific options.
|
||||
Options string
|
||||
|
||||
// Optional represents optional fields.
|
||||
Optional string
|
||||
|
||||
// FSType indicates the type of filesystem, such as EXT3.
|
||||
FSType string
|
||||
|
||||
// Source indicates filesystem specific information or "none".
|
||||
Source string
|
||||
|
||||
// VFSOptions represents per super block options.
|
||||
VFSOptions string
|
||||
}
|
45
vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go
generated
vendored
Normal file
45
vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Self retrieves a list of mounts for the current running process.
|
||||
func Self() ([]Info, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []Info
|
||||
for _, entry := range entries {
|
||||
var mountinfo Info
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||
mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
|
||||
out = append(out, mountinfo)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PID collects the mounts for a specific process ID.
|
||||
func PID(pid int) ([]Info, error) {
|
||||
return nil, fmt.Errorf("mountinfo.PID is not implemented on freebsd")
|
||||
}
|
94
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
Normal file
94
vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
|
||||
)
|
||||
|
||||
// Self retrieves a list of mounts for the current running process.
|
||||
func Self() ([]Info, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]Info, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []Info{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
p = Info{}
|
||||
text = s.Text()
|
||||
optionalFields string
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.ID, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Options, &optionalFields); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) < 3 {
|
||||
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||
}
|
||||
|
||||
if optionalFields != "-" {
|
||||
p.Optional = optionalFields
|
||||
}
|
||||
|
||||
p.FSType = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VFSOptions = strings.Join(postSeparatorFields[2:], " ")
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PID collects the mounts for a specific process ID. If the process
|
||||
// ID is unknown, it is better to use `Self` which will inspect
|
||||
// "/proc/self/mountinfo" instead.
|
||||
func PID(pid int) ([]Info, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
476
vendor/github.com/containerd/containerd/mount/mountinfo_linux_test.go
generated
vendored
Normal file
476
vendor/github.com/containerd/containerd/mount/mountinfo_linux_test.go
generated
vendored
Normal file
|
@ -0,0 +1,476 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw
|
||||
16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel
|
||||
17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755
|
||||
18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw
|
||||
19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw
|
||||
20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel
|
||||
21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000
|
||||
22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755
|
||||
24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd
|
||||
25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw
|
||||
26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children
|
||||
27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children
|
||||
28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children
|
||||
29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children
|
||||
30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children
|
||||
31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children
|
||||
32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children
|
||||
33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children
|
||||
34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children
|
||||
35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered
|
||||
36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct
|
||||
37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel
|
||||
38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel
|
||||
39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel
|
||||
40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw
|
||||
41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw
|
||||
42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw
|
||||
43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw
|
||||
45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered
|
||||
46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered
|
||||
47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered
|
||||
48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered
|
||||
121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000
|
||||
124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw
|
||||
165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
|
||||
167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered
|
||||
171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered
|
||||
175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered
|
||||
179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered
|
||||
183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered
|
||||
187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered
|
||||
195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered
|
||||
199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered
|
||||
203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered
|
||||
207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered
|
||||
211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered
|
||||
215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered
|
||||
219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered
|
||||
223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered
|
||||
227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered
|
||||
231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered
|
||||
235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered
|
||||
239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered
|
||||
243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered
|
||||
247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered
|
||||
31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1`
|
||||
|
||||
ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755
|
||||
18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755
|
||||
20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered
|
||||
21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755
|
||||
22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw
|
||||
23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw
|
||||
24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw
|
||||
25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k
|
||||
26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children
|
||||
27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw
|
||||
28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu
|
||||
29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755
|
||||
30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw
|
||||
31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct
|
||||
32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory
|
||||
33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices
|
||||
34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer
|
||||
35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio
|
||||
36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event
|
||||
37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb
|
||||
38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd
|
||||
39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525
|
||||
40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525
|
||||
41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525
|
||||
42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525
|
||||
43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525
|
||||
44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525
|
||||
45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525
|
||||
46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525
|
||||
47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525
|
||||
48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525
|
||||
49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525
|
||||
50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525
|
||||
51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525
|
||||
52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525
|
||||
53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525
|
||||
54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525
|
||||
55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525
|
||||
56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525
|
||||
57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525
|
||||
58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525
|
||||
59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525
|
||||
60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525
|
||||
61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525
|
||||
62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525
|
||||
63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525
|
||||
64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525
|
||||
65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525
|
||||
66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525
|
||||
67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525
|
||||
68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525
|
||||
69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525
|
||||
70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525
|
||||
71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525
|
||||
72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525
|
||||
73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525
|
||||
74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525
|
||||
75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525
|
||||
76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525
|
||||
77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525
|
||||
78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525
|
||||
79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525
|
||||
80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525
|
||||
81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525
|
||||
82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525
|
||||
83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525
|
||||
84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525
|
||||
85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525
|
||||
86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525
|
||||
87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525
|
||||
88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525
|
||||
89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525
|
||||
90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525
|
||||
91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525
|
||||
92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525
|
||||
93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525
|
||||
94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525
|
||||
95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525
|
||||
96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525
|
||||
97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525
|
||||
98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525
|
||||
99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525
|
||||
100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525
|
||||
101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525
|
||||
102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525
|
||||
103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525
|
||||
104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525
|
||||
105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525
|
||||
106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525
|
||||
107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525
|
||||
108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525
|
||||
109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525
|
||||
110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525
|
||||
111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525
|
||||
112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525
|
||||
113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525
|
||||
114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525
|
||||
115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525
|
||||
116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525
|
||||
117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525
|
||||
118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525
|
||||
119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525
|
||||
120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525
|
||||
121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525
|
||||
122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525
|
||||
123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525
|
||||
124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525
|
||||
125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525
|
||||
126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525
|
||||
127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525
|
||||
128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525
|
||||
129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525
|
||||
130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525
|
||||
131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525
|
||||
132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525
|
||||
133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525
|
||||
134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525
|
||||
135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525
|
||||
136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525
|
||||
137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525
|
||||
138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525
|
||||
139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525
|
||||
140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525
|
||||
141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525
|
||||
142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525
|
||||
143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525
|
||||
144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525`
|
||||
|
||||
gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw
|
||||
17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755
|
||||
18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755
|
||||
19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw
|
||||
20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000
|
||||
21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw
|
||||
22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw
|
||||
23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw
|
||||
24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755
|
||||
25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc
|
||||
26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children
|
||||
27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children
|
||||
28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children
|
||||
29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children
|
||||
30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children
|
||||
31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children
|
||||
32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children
|
||||
33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro
|
||||
34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota
|
||||
35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw
|
||||
36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw
|
||||
42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw
|
||||
43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw
|
||||
44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000
|
||||
68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c
|
||||
86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered
|
||||
38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c
|
||||
39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c
|
||||
40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c
|
||||
41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c
|
||||
45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c
|
||||
46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c
|
||||
47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c
|
||||
48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c
|
||||
49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c
|
||||
50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c
|
||||
51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c
|
||||
52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c
|
||||
53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c
|
||||
54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c
|
||||
55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c
|
||||
56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c
|
||||
57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c
|
||||
59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c
|
||||
60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c
|
||||
61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c
|
||||
62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c
|
||||
63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c
|
||||
64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c
|
||||
65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c
|
||||
66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c
|
||||
70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c
|
||||
71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c
|
||||
72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c
|
||||
73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c
|
||||
76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c
|
||||
77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c
|
||||
78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c
|
||||
79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c
|
||||
80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c
|
||||
81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c
|
||||
82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c
|
||||
83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c
|
||||
84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c
|
||||
94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c
|
||||
95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c
|
||||
96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c
|
||||
97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c
|
||||
98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c
|
||||
102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c
|
||||
103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c
|
||||
104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c
|
||||
105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c
|
||||
106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c
|
||||
107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c
|
||||
108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c
|
||||
109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c
|
||||
110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c
|
||||
111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c
|
||||
112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c
|
||||
113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c
|
||||
114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c
|
||||
117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c
|
||||
118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c
|
||||
119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c
|
||||
120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c
|
||||
121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c
|
||||
122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c
|
||||
123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c
|
||||
126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c
|
||||
127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c
|
||||
128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c
|
||||
130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c
|
||||
131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c
|
||||
132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c
|
||||
133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c
|
||||
134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c
|
||||
135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c
|
||||
136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c
|
||||
137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c
|
||||
138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c
|
||||
139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c
|
||||
140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c
|
||||
141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c
|
||||
142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c
|
||||
143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c
|
||||
144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c
|
||||
147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c
|
||||
150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c
|
||||
151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c
|
||||
152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c
|
||||
153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c
|
||||
154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c
|
||||
155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c
|
||||
156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c
|
||||
157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c
|
||||
158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c
|
||||
159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c
|
||||
160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c
|
||||
162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c
|
||||
163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c
|
||||
164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c
|
||||
165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c
|
||||
166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c
|
||||
167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c
|
||||
168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c
|
||||
169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c
|
||||
170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c
|
||||
171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c
|
||||
172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c
|
||||
173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c
|
||||
174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c
|
||||
184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c
|
||||
187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c
|
||||
188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c
|
||||
189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c
|
||||
190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c
|
||||
191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c
|
||||
192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c
|
||||
193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c
|
||||
194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c
|
||||
195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c
|
||||
196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c
|
||||
197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c
|
||||
198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c
|
||||
199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c
|
||||
200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c
|
||||
201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c
|
||||
202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c
|
||||
203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c
|
||||
204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c
|
||||
205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c
|
||||
206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c
|
||||
207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c
|
||||
208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c
|
||||
209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c
|
||||
210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c
|
||||
211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c
|
||||
212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c
|
||||
213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c
|
||||
214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c
|
||||
215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c
|
||||
216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c
|
||||
217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c
|
||||
218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c
|
||||
219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c
|
||||
220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c
|
||||
221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c
|
||||
222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c
|
||||
223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c
|
||||
224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c
|
||||
225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c
|
||||
226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c
|
||||
227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c
|
||||
228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c
|
||||
229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c
|
||||
230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c
|
||||
231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c
|
||||
232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c
|
||||
233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c
|
||||
234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c
|
||||
235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c
|
||||
237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c
|
||||
238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c
|
||||
239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c
|
||||
240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c
|
||||
241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c
|
||||
242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c
|
||||
243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c
|
||||
244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c
|
||||
245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c
|
||||
246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c
|
||||
247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c
|
||||
249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c
|
||||
250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c
|
||||
251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c
|
||||
252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c
|
||||
253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c
|
||||
254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c
|
||||
255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c
|
||||
256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c
|
||||
257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c
|
||||
259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c
|
||||
260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c
|
||||
261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c
|
||||
262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c
|
||||
263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c
|
||||
264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c
|
||||
58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c
|
||||
67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c
|
||||
265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c
|
||||
270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c
|
||||
273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c
|
||||
278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c
|
||||
281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c
|
||||
286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c
|
||||
289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c
|
||||
99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096`
|
||||
)
|
||||
|
||||
func TestParseFedoraMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(fedoraMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseUbuntuMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(ubuntuMountInfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGentooMountinfo(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(gentooMountinfo))
|
||||
_, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFedoraMountinfoFields(t *testing.T) {
|
||||
r := bytes.NewBuffer([]byte(fedoraMountinfo))
|
||||
infos, err := parseInfoFile(r)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedLength := 58
|
||||
if len(infos) != expectedLength {
|
||||
t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos))
|
||||
}
|
||||
mi := Info{
|
||||
ID: 15,
|
||||
Parent: 35,
|
||||
Major: 0,
|
||||
Minor: 3,
|
||||
Root: "/",
|
||||
Mountpoint: "/proc",
|
||||
Options: "rw,nosuid,nodev,noexec,relatime",
|
||||
Optional: "shared:5",
|
||||
FSType: "proc",
|
||||
Source: "proc",
|
||||
VFSOptions: "rw",
|
||||
}
|
||||
|
||||
if infos[0] != mi {
|
||||
t.Fatalf("expected %#v, got %#v", mi, infos[0])
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue