update vendor

Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
Jess Frazelle 2018-09-25 12:27:46 -04:00
parent 19a32db84d
commit 94d1cfbfbf
No known key found for this signature in database
GPG key ID: 18F3685C0022BFF3
10501 changed files with 2307943 additions and 29279 deletions

View file

@ -1,8 +1,6 @@
language: go
go:
- 1.9.3
- 1.9.4
- tip
- "1.10.x"
go_import_path: github.com/containerd/console
@ -19,8 +17,3 @@ script:
- GOOS=solaris go build
- GOOS=solaris go test -c
- GOOS=windows go test
matrix:
allow_failures:
- go: 1.9.4

View file

@ -72,7 +72,7 @@ func NewEpoller() (*Epoller, error) {
}, nil
}
// Add creates a epoll console based on the provided console. The console will
// Add creates an epoll console based on the provided console. The console will
// be registered with EPOLLET (i.e. using edge-triggered notification) and its
// file descriptor will be set to non-blocking mode. After this, user should use
// the return console to perform I/O.
@ -134,7 +134,7 @@ func (e *Epoller) Wait() error {
}
}
// Close unregister the console's file descriptor from epoll interface
// CloseConsole unregisters the console's file descriptor from epoll interface
func (e *Epoller) CloseConsole(fd int) error {
e.mu.Lock()
defer e.mu.Unlock()
@ -149,12 +149,12 @@ func (e *Epoller) getConsole(sysfd int) *EpollConsole {
return f
}
// Close the epoll fd
// Close closes the epoll fd
func (e *Epoller) Close() error {
return unix.Close(e.efd)
}
// EpollConsole acts like a console but register its file descriptor with a
// EpollConsole acts like a console but registers its file descriptor with an
// epoll fd and uses epoll API to perform I/O.
type EpollConsole struct {
Console
@ -167,7 +167,7 @@ type EpollConsole struct {
// Read reads up to len(p) bytes into p. It returns the number of bytes read
// (0 <= n <= len(p)) and any error encountered.
//
// If the console's read returns EAGAIN or EIO, we assumes that its a
// If the console's read returns EAGAIN or EIO, we assume that it's a
// temporary error because the other side went away and wait for the signal
// generated by epoll event to continue.
func (ec *EpollConsole) Read(p []byte) (n int, err error) {
@ -207,7 +207,7 @@ func (ec *EpollConsole) Read(p []byte) (n int, err error) {
// written from p (0 <= n <= len(p)) and any error encountered that caused
// the write to stop early.
//
// If writes to the console returns EAGAIN or EIO, we assumes that its a
// If writes to the console returns EAGAIN or EIO, we assume that it's a
// temporary error because the other side went away and wait for the signal
// generated by epoll event to continue.
func (ec *EpollConsole) Write(p []byte) (n int, err error) {
@ -224,7 +224,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) {
} else {
hangup = (err == unix.EAGAIN || err == unix.EIO)
}
// if the other end disappear, assume this is temporary and wait for the
// if the other end disappears, assume this is temporary and wait for the
// signal to continue again.
if hangup {
ec.writec.Wait()
@ -242,7 +242,7 @@ func (ec *EpollConsole) Write(p []byte) (n int, err error) {
return n, err
}
// Close closed the file descriptor and signal call waiters for this fd.
// Shutdown closes the file descriptor and signals call waiters for this fd.
// It accepts a callback which will be called with the console's fd. The
// callback typically will be used to do further cleanup such as unregister the
// console's fd from the epoll interface.
@ -262,10 +262,14 @@ func (ec *EpollConsole) Shutdown(close func(int) error) error {
// signalRead signals that the console is readable.
func (ec *EpollConsole) signalRead() {
ec.readc.L.Lock()
ec.readc.Signal()
ec.readc.L.Unlock()
}
// signalWrite signals that the console is writable.
func (ec *EpollConsole) signalWrite() {
ec.writec.L.Lock()
ec.writec.Signal()
ec.writec.L.Unlock()
}

View file

@ -150,11 +150,11 @@ func (m *master) Close() error {
}
func (m *master) Read(b []byte) (int, error) {
panic("not implemented on windows")
return os.Stdin.Read(b)
}
func (m *master) Write(b []byte) (int, error) {
panic("not implemented on windows")
return os.Stdout.Write(b)
}
func (m *master) Fd() uintptr {

View file

@ -11,7 +11,8 @@ branches:
environment:
GOPATH: C:\gopath
CGO_ENABLED: 1
GO_VERSION: 1.9
matrix:
- GO_VERSION: 1.10
before_build:
- choco install -y mingw
@ -21,6 +22,8 @@ before_build:
- 7z x go%GO_VERSION%.windows-amd64.zip -oC:\ >nul
- go version
- choco install codecov
# Print host version. TODO: Remove this when containerd has a way to get host version
- ps: $psversiontable
build_script:
- bash.exe -elc "export PATH=/c/tools/mingw64/bin:/c/gopath/bin:$PATH;

View file

@ -0,0 +1,40 @@
<!--
If you are reporting a new issue, make sure that we do not have any duplicates
already open. You can ensure this by searching the issue list for this
repository. If there is a duplicate, please close your issue and add a comment
to the existing issue instead.
If you suspect your issue is a bug, please edit your issue description to
include the BUG REPORT INFORMATION shown below. If you fail to provide this
information within 7 days, we cannot debug your issue and will close it. We
will, however, reopen it if you later provide the information.
---------------------------------------------------
BUG REPORT INFORMATION
---------------------------------------------------
Use the commands below to provide key information from your environment:
You do NOT have to include this information if this is a FEATURE REQUEST
-->
**Description**
<!--
Briefly describe the problem you are having in a few paragraphs.
-->
**Steps to reproduce the issue:**
1.
2.
3.
**Describe the results you received:**
**Describe the results you expected:**
**Output of `containerd --version`:**
```
(paste your output here)
```

View file

@ -1,4 +1,6 @@
/bin/
/man/
coverage.txt
profile.out
containerd.test
_site/

View file

@ -7,13 +7,13 @@
"fetch\\.go:.*::error: unrecognized printf verb 'r'"
],
"EnableGC": true,
"WarnUnmatchedDirective": true,
"Enable": [
"structcheck",
"unused",
"varcheck",
"staticcheck",
"unconvert",
"gofmt",
"goimports",

17
vendor/github.com/containerd/containerd/.mailmap generated vendored Normal file
View file

@ -0,0 +1,17 @@
Abhinandan Prativadi <abhi@docker.com> Abhinandan Prativadi <aprativadi@gmail.com>
Abhinandan Prativadi <abhi@docker.com> abhi <abhi@docker.com>
Akihiro Suda <suda.akihiro@lab.ntt.co.jp> Akihiro Suda <suda.kyoto@gmail.com>
Andrei Vagin <avagin@virtuozzo.com> Andrei Vagin <avagin@openvz.org>
Frank Yang <yyb196@gmail.com> frank yang <yyb196@gmail.com>
Justin Terry <juterry@microsoft.com> Justin Terry (VM) <juterry@microsoft.com>
Justin Terry <juterry@microsoft.com> Justin <jterry75@users.noreply.github.com>
Kenfe-Mickaël Laventure <mickael.laventure@gmail.com> Kenfe-Mickael Laventure <mickael.laventure@gmail.com>
Kevin Xu <cming.xu@gmail.com> kevin.xu <cming.xu@gmail.com>
Lu Jingxiao <lujingxiao@huawei.com> l00397676 <lujingxiao@huawei.com>
Lantao Liu <lantaol@google.com> Lantao Liu <taotaotheripper@gmail.com>
Phil Estes <estesp@gmail.com> Phil Estes <estesp@linux.vnet.ibm.com>
Stephen J Day <stevvooe@gmail.com> Stephen J Day <stephen.day@docker.com>
Stephen J Day <stevvooe@gmail.com> Stephen Day <stevvooe@users.noreply.github.com>
Stephen J Day <stevvooe@gmail.com> Stephen Day <stephen.day@getcruise.com>
Sudeesh John <sudeesh@linux.vnet.ibm.com> sudeesh john <sudeesh@linux.vnet.ibm.com>
Tõnis Tiigi <tonistiigi@gmail.com> Tonis Tiigi <tonistiigi@gmail.com>

View file

@ -0,0 +1 @@
2.4.2

View file

@ -7,7 +7,7 @@ services:
language: go
go:
- 1.9.x
- "1.10.x"
go_import_path: github.com/containerd/containerd
@ -15,8 +15,6 @@ addons:
apt:
packages:
- btrfs-tools
- libseccomp-dev
- libapparmor-dev
- libnl-3-dev
- libnet-dev
- protobuf-c-compiler
@ -26,6 +24,7 @@ addons:
- libaio-dev
- libprotobuf-c0-dev
- libprotobuf-dev
- socat
env:
- TRAVIS_GOOS=linux TRAVIS_CGO_ENABLED=1
@ -33,16 +32,20 @@ env:
before_install:
- uname -r
- sudo apt-get -q update
- sudo apt-get install -y libseccomp-dev/trusty-backports
install:
- wget https://github.com/google/protobuf/releases/download/v3.5.0/protoc-3.5.0-linux-x86_64.zip -O /tmp/protoc-3.5.0-linux-x86_64.zip
- sudo unzip -o -d /usr/local /tmp/protoc-3.5.0-linux-x86_64.zip
- sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-protobuf
- sudo chmod +x /usr/local/bin/protoc
- sudo chmod og+rx /usr/local/include/google /usr/local/include/google/protobuf /usr/local/include/google/protobuf/compiler
- sudo chmod -R og+r /usr/local/include/google/protobuf/
- protoc --version
- go get -u github.com/vbatts/git-validation
- sudo wget https://github.com/crosbymichael/runc/releases/download/ctd-9/runc -O /bin/runc; sudo chmod +x /bin/runc
- go get -u github.com/kunalkushwaha/ltag
- sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-runc
- sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-cni
- sudo PATH=$PATH GOPATH=$GOPATH script/setup/install-critools
- wget https://github.com/xemul/criu/archive/v3.0.tar.gz -O /tmp/criu.tar.gz
- tar -C /tmp/ -zxf /tmp/criu.tar.gz
- cd /tmp/criu-3.0 && sudo make install-criu
@ -51,12 +54,13 @@ install:
script:
- export GOOS=$TRAVIS_GOOS
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
- GIT_CHECK_EXCLUDE="./vendor" TRAVIS_COMMIT_RANGE="${TRAVIS_COMMIT_RANGE/.../..}" make dco
- DCO_VERBOSITY=-q script/validate/dco
- script/validate/fileheader
- GOOS=linux script/setup/install-dev-tools
- script/validate/vendor
- go build -i .
- make check
- if [ "$GOOS" = "linux" ]; then make check-protos check-api-descriptors; fi
- if [[ "$GOOS" = "linux" && "$TRAVIS_GO_VERSION" != "1.9"* ]]; then make check-protos check-api-descriptors; fi
- make build
- make binaries
- if [ "$GOOS" = "linux" ]; then sudo make install ; fi
@ -65,6 +69,13 @@ script:
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH make integration ; fi
# Run the integration suite a second time. See discussion in github.com/containerd/containerd/pull/1759
- if [ "$GOOS" = "linux" ]; then sudo PATH=$PATH GOPATH=$GOPATH TESTFLAGS_PARALLEL=1 make integration ; fi
- if [ "$GOOS" = "linux" ]; then
sudo PATH=$PATH containerd -log-level debug &> /tmp/containerd-cri.log &
sudo ctr version ;
sudo PATH=$PATH GOPATH=$GOPATH critest --runtime-endpoint=/var/run/containerd/containerd.sock --parallel=8 ;
test $? -ne 0 && cat /tmp/containerd-cri.log ;
sudo pkill containerd ;
fi
after_success:
- bash <(curl -s https://codecov.io/bash) -F linux

View file

@ -14,7 +14,7 @@ This doc includes:
To build the `containerd` daemon, and the `ctr` simple test client, the following build system dependencies are required:
* Go 1.9.x or above
* Go 1.10.x or above
* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via the build tag `no_btrfs`, removing this dependency.
@ -69,8 +69,12 @@ compiler to regenerate the API generated code packages with:
make generate
```
> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
> *Note*: Several build tags are currently available:
> * `no_btrfs`: A build tag disables building the btrfs snapshot driver.
> * `no_cri`: A build tag disables building Kubernetes [CRI](http://blog.kubernetes.io/2016/12/container-runtime-interface-cri-in-kubernetes.html) support into containerd.
> See [here](https://github.com/containerd/cri-containerd#build-tags) for build tags of CRI plugin.
>
> For example, adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
> Makefile target will disable the btrfs driver within the containerd Go build.
Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
@ -84,7 +88,7 @@ You can build static binaries by providing a few variables to `make`:
```sudo
make EXTRA_FLAGS="-buildmode pie" \
EXTRA_LDFLAGS='-extldflags "-fno-PIC -static"' \
BUILDTAGS="static_build"
BUILDTAGS="static_build netgo"
```
> *Note*:
@ -138,11 +142,11 @@ We can build an image from this `Dockerfile`:
FROM golang
RUN apt-get update && \
apt-get install -y btrfs-tools libapparmor-dev libseccomp-dev
apt-get install -y btrfs-tools libseccomp-dev
```
In our Docker container we will use a specific `runc` build which includes [seccomp](https://en.wikipedia.org/wiki/seccomp) and [apparmor](https://en.wikipedia.org/wiki/AppArmor) support. Hence why our Dockerfile includes these dependencies: `libapparmor-dev` `libseccomp-dev`. Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
In our Docker container we will use a specific `runc` build which includes [seccomp](https://en.wikipedia.org/wiki/seccomp) and [apparmor](https://en.wikipedia.org/wiki/AppArmor) support. Hence why our Dockerfile includes `libseccomp-dev` as a dependency (apparmor support doesn't require external libaries). Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
Let's suppose you build an image called `containerd/build` from the above Dockerfile. You can run the following command:

View file

@ -63,6 +63,22 @@ a subsequent commit that uses it.
Remember, you're telling part of the story with the commit message. Don't make
your chapter weird.
## Applying License Header to New Files
If you submit a contribution that adds a new file, please add the license
header. You can do so manually or use the `ltag` tool:
```console
$ go get github.com/kunalkushwaha/ltag
$ ltag -t ./script/validate/template
```
The above will add the appropriate license header to Go language source files,
Makefiles, Dockerfiles, and shell scripts. New templates will need to be added
if other kinds of files are added. Please consult the
documentation at https://github.com/kunalkushwaha/ltag
## Sign your work
The sign-off is a simple line at the end of the explanation for the patch. Your

4
vendor/github.com/containerd/containerd/Gemfile generated vendored Normal file
View file

@ -0,0 +1,4 @@
source "https://rubygems.org"
gem "jekyll"
gem "jekyll-redirect-from"
gem "jekyll-seo-tag"

69
vendor/github.com/containerd/containerd/Gemfile.lock generated vendored Normal file
View file

@ -0,0 +1,69 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.5.2)
public_suffix (>= 2.0.2, < 4.0)
colorator (1.1.0)
concurrent-ruby (1.0.5)
em-websocket (0.5.1)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0.6.0)
eventmachine (1.2.5)
ffi (1.9.18)
forwardable-extended (2.6.0)
http_parser.rb (0.6.0)
i18n (0.9.1)
concurrent-ruby (~> 1.0)
jekyll (3.7.0)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 0.7)
jekyll-sass-converter (~> 1.0)
jekyll-watch (~> 2.0)
kramdown (~> 1.14)
liquid (~> 4.0)
mercenary (~> 0.3.3)
pathutil (~> 0.9)
rouge (>= 1.7, < 4)
safe_yaml (~> 1.0)
jekyll-redirect-from (0.13.0)
jekyll (~> 3.3)
jekyll-sass-converter (1.5.1)
sass (~> 3.4)
jekyll-seo-tag (2.4.0)
jekyll (~> 3.3)
jekyll-watch (2.0.0)
listen (~> 3.0)
kramdown (1.16.2)
liquid (4.0.0)
listen (3.1.5)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
ruby_dep (~> 1.2)
mercenary (0.3.6)
pathutil (0.16.1)
forwardable-extended (~> 2.6)
public_suffix (3.0.1)
rb-fsevent (0.10.2)
rb-inotify (0.9.10)
ffi (>= 0.5.0, < 2)
rouge (3.1.0)
ruby_dep (1.5.0)
safe_yaml (1.0.4)
sass (3.5.4)
sass-listen (~> 4.0.0)
sass-listen (4.0.0)
rb-fsevent (~> 0.9, >= 0.9.4)
rb-inotify (~> 0.9, >= 0.9.7)
PLATFORMS
ruby
DEPENDENCIES
jekyll
jekyll-redirect-from
jekyll-seo-tag
BUNDLED WITH
1.16.1

View file

@ -176,7 +176,7 @@
END OF TERMS AND CONDITIONS
Copyright 2013-2016 Docker, Inc.
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View file

@ -1,395 +0,0 @@
Attribution 4.0 International
=======================================================================
Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.
Using Creative Commons Public Licenses
Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.
Considerations for licensors: Our public licenses are
intended for use by those authorized to give the public
permission to use material in ways otherwise restricted by
copyright and certain other rights. Our licenses are
irrevocable. Licensors should read and understand the terms
and conditions of the license they choose before applying it.
Licensors should also secure all rights necessary before
applying our licenses so that the public can reuse the
material as expected. Licensors should clearly mark any
material not subject to the license. This includes other CC-
licensed material, or material used under an exception or
limitation to copyright. More considerations for licensors:
wiki.creativecommons.org/Considerations_for_licensors
Considerations for the public: By using one of our public
licenses, a licensor grants the public permission to use the
licensed material under specified terms and conditions. If
the licensor's permission is not necessary for any reason--for
example, because of any applicable exception or limitation to
copyright--then that use is not regulated by the license. Our
licenses grant only permissions under copyright and certain
other rights that a licensor has authority to grant. Use of
the licensed material may still be restricted for other
reasons, including because others have copyright or other
rights in the material. A licensor may make special requests,
such as asking that all changes be marked or described.
Although not required by our licenses, you are encouraged to
respect those requests where reasonable. More_considerations
for the public:
wiki.creativecommons.org/Considerations_for_licensees
=======================================================================
Creative Commons Attribution 4.0 International Public License
By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution 4.0 International Public License ("Public License"). To the
extent this Public License may be interpreted as a contract, You are
granted the Licensed Rights in consideration of Your acceptance of
these terms and conditions, and the Licensor grants You such rights in
consideration of benefits the Licensor receives from making the
Licensed Material available under these terms and conditions.
Section 1 -- Definitions.
a. Adapted Material means material subject to Copyright and Similar
Rights that is derived from or based upon the Licensed Material
and in which the Licensed Material is translated, altered,
arranged, transformed, or otherwise modified in a manner requiring
permission under the Copyright and Similar Rights held by the
Licensor. For purposes of this Public License, where the Licensed
Material is a musical work, performance, or sound recording,
Adapted Material is always produced where the Licensed Material is
synched in timed relation with a moving image.
b. Adapter's License means the license You apply to Your Copyright
and Similar Rights in Your contributions to Adapted Material in
accordance with the terms and conditions of this Public License.
c. Copyright and Similar Rights means copyright and/or similar rights
closely related to copyright including, without limitation,
performance, broadcast, sound recording, and Sui Generis Database
Rights, without regard to how the rights are labeled or
categorized. For purposes of this Public License, the rights
specified in Section 2(b)(1)-(2) are not Copyright and Similar
Rights.
d. Effective Technological Measures means those measures that, in the
absence of proper authority, may not be circumvented under laws
fulfilling obligations under Article 11 of the WIPO Copyright
Treaty adopted on December 20, 1996, and/or similar international
agreements.
e. Exceptions and Limitations means fair use, fair dealing, and/or
any other exception or limitation to Copyright and Similar Rights
that applies to Your use of the Licensed Material.
f. Licensed Material means the artistic or literary work, database,
or other material to which the Licensor applied this Public
License.
g. Licensed Rights means the rights granted to You subject to the
terms and conditions of this Public License, which are limited to
all Copyright and Similar Rights that apply to Your use of the
Licensed Material and that the Licensor has authority to license.
h. Licensor means the individual(s) or entity(ies) granting rights
under this Public License.
i. Share means to provide material to the public by any means or
process that requires permission under the Licensed Rights, such
as reproduction, public display, public performance, distribution,
dissemination, communication, or importation, and to make material
available to the public including in ways that members of the
public may access the material from a place and at a time
individually chosen by them.
j. Sui Generis Database Rights means rights other than copyright
resulting from Directive 96/9/EC of the European Parliament and of
the Council of 11 March 1996 on the legal protection of databases,
as amended and/or succeeded, as well as other essentially
equivalent rights anywhere in the world.
k. You means the individual or entity exercising the Licensed Rights
under this Public License. Your has a corresponding meaning.
Section 2 -- Scope.
a. License grant.
1. Subject to the terms and conditions of this Public License,
the Licensor hereby grants You a worldwide, royalty-free,
non-sublicensable, non-exclusive, irrevocable license to
exercise the Licensed Rights in the Licensed Material to:
a. reproduce and Share the Licensed Material, in whole or
in part; and
b. produce, reproduce, and Share Adapted Material.
2. Exceptions and Limitations. For the avoidance of doubt, where
Exceptions and Limitations apply to Your use, this Public
License does not apply, and You do not need to comply with
its terms and conditions.
3. Term. The term of this Public License is specified in Section
6(a).
4. Media and formats; technical modifications allowed. The
Licensor authorizes You to exercise the Licensed Rights in
all media and formats whether now known or hereafter created,
and to make technical modifications necessary to do so. The
Licensor waives and/or agrees not to assert any right or
authority to forbid You from making technical modifications
necessary to exercise the Licensed Rights, including
technical modifications necessary to circumvent Effective
Technological Measures. For purposes of this Public License,
simply making modifications authorized by this Section 2(a)
(4) never produces Adapted Material.
5. Downstream recipients.
a. Offer from the Licensor -- Licensed Material. Every
recipient of the Licensed Material automatically
receives an offer from the Licensor to exercise the
Licensed Rights under the terms and conditions of this
Public License.
b. No downstream restrictions. You may not offer or impose
any additional or different terms or conditions on, or
apply any Effective Technological Measures to, the
Licensed Material if doing so restricts exercise of the
Licensed Rights by any recipient of the Licensed
Material.
6. No endorsement. Nothing in this Public License constitutes or
may be construed as permission to assert or imply that You
are, or that Your use of the Licensed Material is, connected
with, or sponsored, endorsed, or granted official status by,
the Licensor or others designated to receive attribution as
provided in Section 3(a)(1)(A)(i).
b. Other rights.
1. Moral rights, such as the right of integrity, are not
licensed under this Public License, nor are publicity,
privacy, and/or other similar personality rights; however, to
the extent possible, the Licensor waives and/or agrees not to
assert any such rights held by the Licensor to the limited
extent necessary to allow You to exercise the Licensed
Rights, but not otherwise.
2. Patent and trademark rights are not licensed under this
Public License.
3. To the extent possible, the Licensor waives any right to
collect royalties from You for the exercise of the Licensed
Rights, whether directly or through a collecting society
under any voluntary or waivable statutory or compulsory
licensing scheme. In all other cases the Licensor expressly
reserves any right to collect such royalties.
Section 3 -- License Conditions.
Your exercise of the Licensed Rights is expressly made subject to the
following conditions.
a. Attribution.
1. If You Share the Licensed Material (including in modified
form), You must:
a. retain the following if it is supplied by the Licensor
with the Licensed Material:
i. identification of the creator(s) of the Licensed
Material and any others designated to receive
attribution, in any reasonable manner requested by
the Licensor (including by pseudonym if
designated);
ii. a copyright notice;
iii. a notice that refers to this Public License;
iv. a notice that refers to the disclaimer of
warranties;
v. a URI or hyperlink to the Licensed Material to the
extent reasonably practicable;
b. indicate if You modified the Licensed Material and
retain an indication of any previous modifications; and
c. indicate the Licensed Material is licensed under this
Public License, and include the text of, or the URI or
hyperlink to, this Public License.
2. You may satisfy the conditions in Section 3(a)(1) in any
reasonable manner based on the medium, means, and context in
which You Share the Licensed Material. For example, it may be
reasonable to satisfy the conditions by providing a URI or
hyperlink to a resource that includes the required
information.
3. If requested by the Licensor, You must remove any of the
information required by Section 3(a)(1)(A) to the extent
reasonably practicable.
4. If You Share Adapted Material You produce, the Adapter's
License You apply must not prevent recipients of the Adapted
Material from complying with this Public License.
Section 4 -- Sui Generis Database Rights.
Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
to extract, reuse, reproduce, and Share all or a substantial
portion of the contents of the database;
b. if You include all or a substantial portion of the database
contents in a database in which You have Sui Generis Database
Rights, then the database in which You have Sui Generis Database
Rights (but not its individual contents) is Adapted Material; and
c. You must comply with the conditions in Section 3(a) if You Share
all or a substantial portion of the contents of the database.
For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
c. The disclaimer of warranties and limitation of liability provided
above shall be interpreted in a manner that, to the extent
possible, most closely approximates an absolute disclaimer and
waiver of all liability.
Section 6 -- Term and Termination.
a. This Public License applies for the term of the Copyright and
Similar Rights licensed here. However, if You fail to comply with
this Public License, then Your rights under this Public License
terminate automatically.
b. Where Your right to use the Licensed Material has terminated under
Section 6(a), it reinstates:
1. automatically as of the date the violation is cured, provided
it is cured within 30 days of Your discovery of the
violation; or
2. upon express reinstatement by the Licensor.
For the avoidance of doubt, this Section 6(b) does not affect any
right the Licensor may have to seek remedies for Your violations
of this Public License.
c. For the avoidance of doubt, the Licensor may also offer the
Licensed Material under separate terms or conditions or stop
distributing the Licensed Material at any time; however, doing so
will not terminate this Public License.
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
License.
Section 7 -- Other Terms and Conditions.
a. The Licensor shall not be bound by any additional or different
terms or conditions communicated by You unless expressly agreed.
b. Any arrangements, understandings, or agreements regarding the
Licensed Material not stated herein are separate from and
independent of the terms and conditions of this Public License.
Section 8 -- Interpretation.
a. For the avoidance of doubt, this Public License does not, and
shall not be interpreted to, reduce, limit, restrict, or impose
conditions on any use of the Licensed Material that could lawfully
be made without permission under this Public License.
b. To the extent possible, if any provision of this Public License is
deemed unenforceable, it shall be automatically reformed to the
minimum extent necessary to make it enforceable. If the provision
cannot be reformed, it shall be severed from this Public License
without affecting the enforceability of the remaining terms and
conditions.
c. No term or condition of this Public License will be waived and no
failure to comply consented to unless expressly agreed to by the
Licensor.
d. Nothing in this Public License constitutes or may be interpreted
as a limitation upon, or waiver of, any privileges and immunities
that apply to the Licensor or You, including from the legal
processes of any jurisdiction or authority.
=======================================================================
Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.
Creative Commons may be contacted at creativecommons.org.

View file

@ -28,6 +28,19 @@ It's easy to appreciate a really cool and technically advanced feature. It's har
to appreciate the absence of bugs, the slow but steady improvement in stability,
or the reliability of a release process. But those things distinguish a good
project from a great one.
"""
[Rules.reviewer]
title = "What is a reviewer?"
text = """
A reviewer is a core role within the project.
They share in reviewing issues and pull requests and their LGTM count towards the
required LGTM count to merge a code change into the project.
Reviewers are part of the organization but do not have write access.
Becoming a reviewer is a core aspect in the journey to becoming a maintainer.
"""
[Rules.adding-maintainers]
@ -201,6 +214,18 @@ containerd defers to the [Technical Steering Committee](https://github.com/moby/
"jhowardmsft",
"mlaventure",
"stevvooe",
"abhi",
"dchen1107",
"Random-Liu",
"mikebrow",
"yujuhong",
]
[Org.Reviewers]
people = [
"dnephin",
"jessvalarezo",
"yanxuean",
"miaoyq",
]
[People]
@ -249,3 +274,48 @@ containerd defers to the [Technical Steering Committee](https://github.com/moby/
Name = "Stephen Day"
Email = "stephen.day@docker.com"
GitHub = "stevvooe"
[People.jessvalarezo]
Name = "Jessica Valarezo"
Email = "valarezo.jessica@gmail.com"
GitHub = "jessvalarezo"
[People.dnephin]
Name = "Daniel Nephin"
Email = "dnephin@gmail.com"
GitHub = "dnephin"
[people.abhi]
Name = "Abhinandan Prativadi"
Email = "aprativadi@gmail.com"
GitHub = "abhi"
[people.dchen1107]
Name = "Dawn Chen"
Email = "dawnchen@google.com"
GitHub = "dchen1107"
[people.Random-Liu]
Name = "Lantao Liu"
Email = "lantaol@google.com"
GitHub = "Random-Liu"
[people.mikebrow]
Name = "Mike Brown"
Email = "brownwm@us.ibm.com"
GitHub = "mikebrow"
[people.yujuhong]
Name = "Yu-Ju Hong"
Email = "yjhong@google.com"
GitHub = "yujuhong"
[people.yanxuean]
Name = "Xuean Yan"
Email = "yan.xuean@zte.com.cn"
GitHub = "yanxuean"
[people.miaoyq]
Name = "Yanqiang Miao"
Email = "miao.yanqiang@zte.com.cn"
GitHub = "miaoyq"

View file

@ -1,3 +1,18 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Root directory of the project (absolute path).
ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST))))
@ -12,8 +27,27 @@ ifneq "$(strip $(shell command -v go 2>/dev/null))" ""
GOOS ?= $(shell go env GOOS)
GOARCH ?= $(shell go env GOARCH)
else
GOOS ?= $$GOOS
GOARCH ?= $$GOARCH
ifeq ($(GOOS),)
# approximate GOOS for the platform if we don't have Go and GOOS isn't
# set. We leave GOARCH unset, so that may need to be fixed.
ifeq ($(OS),Windows_NT)
GOOS = windows
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
GOOS = linux
endif
ifeq ($(UNAME_S),Darwin)
GOOS = darwin
endif
ifeq ($(UNAME_S),FreeBSD)
GOOS = freebsd
endif
endif
else
GOOS ?= $$GOOS
GOARCH ?= $$GOARCH
endif
endif
WHALE = "🇩"
@ -38,25 +72,35 @@ TEST_REQUIRES_ROOT_PACKAGES=$(filter \
# Project binaries.
COMMANDS=ctr containerd containerd-stress containerd-release
BINARIES=$(addprefix bin/,$(COMMANDS))
MANPAGES=ctr.1 containerd.1 config.toml.5 containerd-config.1
# Build tags seccomp and apparmor are needed by CRI plugin.
BUILDTAGS ?= seccomp apparmor
GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",)
GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)'
SHIM_GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) -extldflags "-static"'
TESTFLAGS_RACE=
GO_GCFLAGS=
#Replaces ":" (*nix), ";" (windows) with newline for easy parsing
GOPATHS=$(shell echo ${GOPATH} | tr ":" "\n" | tr ";" "\n")
TESTFLAGS_RACE=
GO_BUILD_FLAGS=
# See Golang issue re: '-trimpath': https://github.com/golang/go/issues/13809
GO_GCFLAGS=$(shell \
set -- ${GOPATHS}; \
echo "-gcflags=-trimpath=$${1}/src"; \
)
#Detect the target os
include Makefile.OS
#include platform specific makefile
include Makefile.$(target_os)
-include Makefile.$(GOOS)
BINARIES=$(addprefix bin/,$(COMMANDS))
# Flags passed to `go test`
TESTFLAGS ?= -v $(TESTFLAGS_RACE)
TESTFLAGS_PARALLEL ?= 8
.PHONY: clean all AUTHORS fmt vet lint dco build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release
.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install uninstall vendor release mandir install-man
.DEFAULT: default
all: binaries
@ -97,17 +141,9 @@ proto-fmt: ## check format of proto files
@test -z "$$(find . -path ./vendor -prune -o -name '*.proto' -type f -exec grep -Hn "Meta meta = " {} \; | grep -v '(gogoproto.nullable) = false' | tee /dev/stderr)" || \
(echo "$(ONI) meta fields in proto files must have option (gogoproto.nullable) = false" && false)
dco: ## dco check
@which git-validation > /dev/null 2>/dev/null || (echo "ERROR: git-validation not found" && false)
ifdef TRAVIS_COMMIT_RANGE
git-validation -q -run DCO,short-subject,dangling-whitespace
else
git-validation -v -run DCO,short-subject,dangling-whitespace -range $(EPOCH_TEST_COMMIT)..HEAD
endif
build: ## build the go packages
@echo "$(WHALE) $@"
@go build ${EXTRA_FLAGS} ${GO_LDFLAGS} ${GO_GCFLAGS} ${PACKAGES}
@go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${EXTRA_FLAGS} ${GO_LDFLAGS} ${PACKAGES}
test: ## run tests, except integration tests and tests that require root
@echo "$(WHALE) $@"
@ -130,15 +166,34 @@ FORCE:
# Build a binary from a cmd.
bin/%: cmd/% FORCE
@echo "$(WHALE) $@${BINARY_SUFFIX}"
@go build -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ${GO_GCFLAGS} ./$<
@go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$<
bin/containerd-shim: cmd/containerd-shim FORCE # set !cgo and omit pie for a static shim build: https://github.com/golang/go/issues/17789#issuecomment-258542220
@echo "$(WHALE) bin/containerd-shim"
@CGO_ENABLED=0 go build -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
@CGO_ENABLED=0 go build ${GO_BUILD_FLAGS} -o bin/containerd-shim ${SHIM_GO_LDFLAGS} ${GO_TAGS} ./cmd/containerd-shim
binaries: $(BINARIES) ## build binaries
@echo "$(WHALE) $@"
man: mandir $(addprefix man/,$(MANPAGES))
@echo "$(WHALE) $@"
mandir:
@mkdir -p man
man/%: docs/man/%.md FORCE
@echo "$(WHALE) $<"
go-md2man -in "$<" -out "$@"
define installmanpage
mkdir -p $(DESTDIR)/man/man$(2);
gzip -c $(1) >$(DESTDIR)/man/man$(2)/$(3).gz;
endef
install-man:
@echo "$(WHALE) $@"
$(foreach manpage,$(addprefix man/,$(MANPAGES)), $(call installmanpage,$(manpage),$(subst .,,$(suffix $(manpage))),$(notdir $(manpage))))
release: $(BINARIES)
@echo "$(WHALE) $@"
@rm -rf releases/$(RELEASE) releases/$(RELEASE).tar.gz

View file

@ -1,16 +0,0 @@
#Detect the OS, and return installos value same as GOOS
target_os =
ifeq ($(OS),Windows_NT)
target_os = windows
else
UNAME_S := $(shell uname -s)
ifeq ($(UNAME_S),Linux)
target_os = linux
endif
ifeq ($(UNAME_S),Darwin)
target_os = darwin
endif
ifeq ($(UNAME_S),FreeBSD)
target_os = freebsd
endif
endif

View file

@ -1,3 +1,18 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#darwin specific settings
COMMANDS += containerd-shim

View file

@ -1,3 +1,18 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#freebsd specific settings
COMMANDS += containerd-shim

View file

@ -1,9 +1,24 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#linux specific settings
COMMANDS += containerd-shim
# check GOOS for cross compile builds
ifeq ($(GOOS),linux)
GO_GCFLAGS= -buildmode=pie
GO_GCFLAGS += -buildmode=pie
endif
# amd64 supports go test -race

View file

@ -1,3 +1,18 @@
# Copyright The containerd Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Windows specific settings.
WHALE = "+"
ONI = "-"

View file

@ -12,7 +12,7 @@ plugins = ["grpc", "fieldpath"]
# Paths that should be treated as include roots in relation to the vendor
# directory. These will be calculated with the vendor directory nearest the
# target package.
packages = ["github.com/gogo/protobuf"]
packages = ["github.com/gogo/protobuf", "github.com/gogo/googleapis"]
# Paths that will be added untouched to the end of the includes. We use
# `/usr/local/include` to pickup the common install location of protobuf.
@ -29,7 +29,7 @@ plugins = ["grpc", "fieldpath"]
"google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types"
"google/protobuf/duration.proto" = "github.com/gogo/protobuf/types"
"google/rpc/status.proto" = "github.com/containerd/containerd/protobuf/google/rpc"
"google/rpc/status.proto" = "github.com/gogo/googleapis/google/rpc"
[[overrides]]
prefixes = ["github.com/containerd/containerd/api/events"]

View file

@ -4,6 +4,7 @@
[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/1271/badge)](https://bestpractices.coreinfrastructure.org/projects/1271)
containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
@ -13,7 +14,7 @@ containerd is designed to be embedded into a larger system, rather than being us
## Getting Started
See our documentation on [containerd.io](containerd.io):
See our documentation on [containerd.io](https://containerd.io):
* [for ops and admins](docs/ops.md)
* [namespaces](docs/namespaces.md)
* [client options](docs/client-opts.md)
@ -210,6 +211,5 @@ __If you are reporting a security issue, please reach out discreetly at security
The containerd codebase is released under the [Apache 2.0 license](LICENSE.code).
The README.md file, and files in the "docs" folder are licensed under the
Creative Commons Attribution 4.0 International License under the terms and
conditions set forth in the file "[LICENSE.docs](LICENSE.docs)". You may obtain a duplicate
copy of the same license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.
Creative Commons Attribution 4.0 International License. You may obtain a
copy of the license, titled CC-BY-4.0, at http://creativecommons.org/licenses/by/4.0/.

View file

@ -78,7 +78,7 @@ by `<major>.<minor>`. Releases branches will be in one of several states:
- __*Next*__: The next planned release branch.
- __*Active*__: The release is currently supported and accepting patches.
- __*End of Life*__: The release branch is no longer support and no new patches will be accepted.
- __*End of Life*__: The release branch is no longer supported and no new patches will be accepted.
Releases will be supported up to one year after a _minor_ release. This means that
we will accept bug reports and backports to release branches until the end of
@ -93,8 +93,9 @@ The current state is available in the following table:
| [0.0](https://github.com/containerd/containerd/releases/tag/0.0.5) | End of Life | Dec 4, 2015 | - |
| [0.1](https://github.com/containerd/containerd/releases/tag/v0.1.0) | End of Life | Mar 21, 2016 | - |
| [0.2](https://github.com/containerd/containerd/tree/v0.2.x) | End of Life | Apr 21, 2016 | December 5, 2017 |
| [1.0](https://github.com/containerd/contaienrd/releases/tag/v1.0.0) | Active | December 5, 2017 | max(December 5, 2018, release of 1.1.0) |
| [1.1](https://github.com/containerd/containerd/milestone/15) | Next | TBD | max(TBD+1 year, release of 1.2.0) |
| [1.0](https://github.com/containerd/containerd/releases/tag/v1.0.0) | Active | December 5, 2017 | December 5, 2018 |
| [1.1](https://github.com/containerd/containerd/releases/tag/v1.1.0) | Active | April 23, 2018 | max(April 23, 2019, release of 1.2.0, Kubernetes 1.10 EOL) |
| [1.2](https://github.com/containerd/containerd/milestone/17) | Next | TBD | max(TBD+1 year, release of 1.3.0) |
Note that branches and release from before 1.0 may not follow these rules.
@ -163,12 +164,13 @@ The following table provides an overview of the components covered by
containerd versions:
| Component | Status | Stablized Version | Links |
|---------------|----------|-------------------|---------------|
| GRPC API | Beta | 1.0 | [api/](api) |
| Metrics API | Beta | 1.0 | -
| Go client API | Unstable | 1.1 tentative | [godoc](https://godoc.org/github.com/containerd/containerd) |
| `ctr` tool | Unstable | Out of scope | - |
| Component | Status | Stabilized Version | Links |
|---------------|----------|--------------------|---------------|
| GRPC API | Stable | 1.0 | [api/](api) |
| Metrics API | Stable | 1.0 | - |
| Go client API | Unstable | 1.2 _tentative_ | [godoc](https://godoc.org/github.com/containerd/containerd) |
| CRI GRPC API | Unstable | v1alpha2 _current_ | [api/](https://github.com/kubernetes/kubernetes/tree/master/pkg/kubelet/apis/cri/runtime/v1alpha2) |
| `ctr` tool | Unstable | Out of scope | - |
From the version stated in the above table, that component must adhere to the
stability constraints expected in release versions.
@ -243,6 +245,19 @@ been carried out.
Any changes to the API should be detectable at compile time, so upgrading will
be a matter of fixing compilation errors and moving from there.
### CRI GRPC API
The CRI (Container Runtime Interface) GRPC API is used by a Kubernetes kubelet
to communicate with a container runtime. This interface is used to manage
container lifecycles and container images. Currently this API is under
development and unstable across Kubernetes releases. Each Kubernetes release
only supports a single version of CRI and the CRI plugin only implements a
single version of CRI.
Each _minor_ release will support one version of CRI and at least one version
of Kubernetes. Once this API is stable, a _minor_ will be compatible with any
version of Kubernetes which supports that version of CRI.
### `ctr` tool
The `ctr` tool provides the ability to introspect and understand the containerd

View file

@ -1,3 +1,28 @@
# containerd Roadmap
# containerd roadmap
Please review the milestones on [github](https://github.com/containerd/containerd/milestones) for the updated roadmap and release information.
containerd uses the issues and milestones to define its roadmap.
`ROADMAP.md` files are common in open source projects but we find they quickly become out of date.
We opt for an issues and milestone approach that our maintainers and community can keep up-to-date as work is added and completed.
## Issues
Issues tagged with the `roadmap` label are high level roadmap items.
They are tasks and/or features that the containerd community wants completed.
Smaller issues and pull requests can reference back to the main roadmap issue that is tagged to help detail progress towards the overall goal.
## Milestones
Milestones define when an issue, pull request, and/or roadmap item is to be completed.
Issues are the what, milestones are the when.
Development is complex therefore roadmap items can move between milestones depending on the remaining development and testing required to release a change.
## Searching
To find the roadmap items currently planned for containerd you can filter on the `roadmap` label.
[Search Roadmap Items](https://github.com/containerd/containerd/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap)
After searching for roadmap items you can view what milestone they are scheduled to be completed in along with the progress.
[View Milestones](https://github.com/containerd/containerd/milestones)

View file

@ -2,11 +2,11 @@ containerd is built with OCI support and with support for advanced features prov
We depend on a specific `runc` version when dealing with advanced features. You should have a specific runc build for development. The current supported runc commit is:
RUNC_COMMIT = 9f9c96235cc97674e935002fc3d78361b696a69e
RUNC_COMMIT = a618ab5a0186905949ee463dbb762c3d23e12a80
For more information on how to clone and build runc see the runc Building [documentation](https://github.com/opencontainers/runc#building).
Note: before building you may need to install additional support, which will vary by platform. For example, you may need to install `libseccomp` and `libapparmor` e.g. `libseccomp-dev` and `libapparmor-dev` for Ubuntu.
Note: before building you may need to install additional support, which will vary by platform. For example, you may need to install `libseccomp` e.g. `libseccomp-dev` for Ubuntu.
## building

4181
vendor/github.com/containerd/containerd/api/1.0.pb.txt generated vendored Executable file

File diff suppressed because it is too large Load diff

4181
vendor/github.com/containerd/containerd/api/1.1.pb.txt generated vendored Executable file

File diff suppressed because it is too large Load diff

18
vendor/github.com/containerd/containerd/api/README.md generated vendored Normal file
View file

@ -0,0 +1,18 @@
This directory contains the GRPC API definitions for containerd.
All defined services and messages have been aggregated into `*.pb.txt`
descriptors files in this directory. Definitions present here are considered
frozen after the release.
At release time, the current `next.pb.txt` file will be moved into place to
freeze the API changes for the minor version. For example, when 1.0.0 is
released, `next.pb.txt` should be moved to `1.0.txt`. Notice that we leave off
the patch number, since the API will be completely locked down for a given
patch series.
We may find that by default, protobuf descriptors are too noisy to lock down
API changes. In that case, we may filter out certain fields in the descriptors,
possibly regenerating for old versions.
This process is similar to the [process used to ensure backwards compatibility
in Go](https://github.com/golang/go/tree/master/api).

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,31 @@
syntax = "proto3";
package containerd.events;
import "google/protobuf/any.proto";
import weak "gogoproto/gogo.proto";
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message ContainerCreate {
string id = 1;
string image = 2;
message Runtime {
string name = 1;
google.protobuf.Any options = 2;
}
Runtime runtime = 3;
}
message ContainerUpdate {
string id = 1;
string image = 2;
map<string, string> labels = 3;
string snapshot_key = 4;
}
message ContainerDelete {
string id = 1;
}

View file

@ -0,0 +1,329 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/events/content.proto
package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ContentDelete struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
}
func (m *ContentDelete) Reset() { *m = ContentDelete{} }
func (*ContentDelete) ProtoMessage() {}
func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
func init() {
proto.RegisterType((*ContentDelete)(nil), "containerd.events.ContentDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ContentDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "digest":
return string(m.Digest), len(m.Digest) > 0
}
return "", false
}
func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Digest) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
return i, nil
}
func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ContentDelete) Size() (n int) {
var l int
_ = l
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovContent(uint64(l))
}
return n
}
func sovContent(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozContent(x uint64) (n int) {
return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ContentDelete) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ContentDelete{`,
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
`}`,
}, "")
return s
}
func valueToStringContent(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ContentDelete) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowContent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowContent
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthContent
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipContent(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthContent
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipContent(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowContent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowContent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowContent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthContent
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowContent
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipContent(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowContent = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/events/content.proto", fileDescriptorContent)
}
var fileDescriptorContent = []byte{
// 228 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0x83, 0x45, 0x53,
0xf3, 0x4a, 0xf4, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, 0x85, 0x04, 0x11, 0x8a, 0xf4, 0x20, 0x0a, 0xa4,
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0xb2, 0xfa, 0x20, 0x16, 0x44, 0xa1, 0x94, 0x03, 0x41, 0x3b,
0xc0, 0xea, 0x92, 0x4a, 0xd3, 0xf4, 0x0b, 0x72, 0x4a, 0xd3, 0x33, 0xf3, 0xf4, 0xd3, 0x32, 0x53,
0x73, 0x52, 0x0a, 0x12, 0x4b, 0x32, 0x20, 0x26, 0x28, 0x45, 0x73, 0xf1, 0x3a, 0x43, 0xec, 0x76,
0x49, 0xcd, 0x49, 0x2d, 0x49, 0x15, 0xf2, 0xe2, 0x62, 0x4b, 0xc9, 0x4c, 0x4f, 0x2d, 0x2e, 0x91,
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x32, 0x3a, 0x71, 0x4f, 0x9e, 0xe1, 0xd6, 0x3d, 0x79, 0x2d,
0x24, 0xab, 0xf2, 0x0b, 0x52, 0xf3, 0xe0, 0x76, 0x14, 0xeb, 0xa7, 0xe7, 0xeb, 0x42, 0xb4, 0xe8,
0xb9, 0x80, 0xa9, 0x20, 0xa8, 0x09, 0x4e, 0x01, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7,
0xd0, 0xf0, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92,
0x63, 0x5c, 0xf0, 0x45, 0x8e, 0x31, 0xca, 0x88, 0x84, 0x00, 0xb2, 0x86, 0x50, 0x11, 0x0c, 0x11,
0x8c, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x78, 0x99, 0xee,
0x61, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,13 @@
syntax = "proto3";
package containerd.events;
import weak "gogoproto/gogo.proto";
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message ContentDelete {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}

View file

@ -0,0 +1,19 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package events has protobuf types for various events that are used in
// containerd.
package events

View file

@ -0,0 +1,949 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/events/image.proto
package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
import sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type ImageCreate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *ImageCreate) Reset() { *m = ImageCreate{} }
func (*ImageCreate) ProtoMessage() {}
func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
type ImageUpdate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *ImageUpdate) Reset() { *m = ImageUpdate{} }
func (*ImageUpdate) ProtoMessage() {}
func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
type ImageDelete struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *ImageDelete) Reset() { *m = ImageDelete{} }
func (*ImageDelete) ProtoMessage() {}
func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
func init() {
proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate")
proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageUpdate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *ImageDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
dAtA[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
i = encodeVarintImage(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintImage(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintImage(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
dAtA[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
i = encodeVarintImage(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintImage(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintImage(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
return i, nil
}
func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *ImageCreate) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovImage(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
}
}
return n
}
func (m *ImageUpdate) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovImage(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
}
}
return n
}
func (m *ImageDelete) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovImage(uint64(l))
}
return n
}
func sovImage(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozImage(x uint64) (n int) {
return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *ImageCreate) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&ImageCreate{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
`}`,
}, "")
return s
}
func (this *ImageUpdate) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&ImageUpdate{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
`}`,
}, "")
return s
}
func (this *ImageDelete) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ImageDelete{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func valueToStringImage(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *ImageCreate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthImage
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthImage
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthImage
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthImage
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipImage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthImage
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipImage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthImage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ImageUpdate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthImage
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthImage
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthImage
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthImage
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipImage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthImage
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipImage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthImage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ImageDelete) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowImage
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthImage
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipImage(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthImage
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipImage(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowImage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowImage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowImage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthImage
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowImage
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipImage(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowImage = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/events/image.proto", fileDescriptorImage)
}
var fileDescriptorImage = []byte{
// 292 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4f, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x67, 0xe6,
0x26, 0xa6, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xe8, 0x15, 0xa7,
0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb, 0x81, 0x15, 0x14, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10,
0x34, 0x17, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a,
0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02, 0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e,
0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84, 0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53,
0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21, 0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4,
0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43,
0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25,
0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4, 0x46, 0x10, 0x53, 0x48,
0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59,
0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42, 0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1,
0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x80, 0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c,
0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f,
0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x44, 0x42, 0xc2, 0xb1, 0x86, 0x50, 0x11,
0x0c, 0x49, 0x6c, 0xe0, 0xb8, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x41, 0x80, 0x92, 0x17,
0x77, 0x02, 0x00, 0x00,
}

View file

@ -0,0 +1,22 @@
syntax = "proto3";
package containerd.services.images.v1;
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message ImageCreate {
string name = 1;
map<string, string> labels = 2;
}
message ImageUpdate {
string name = 1;
map<string, string> labels = 2;
}
message ImageDelete {
string name = 1;
}

View file

@ -0,0 +1,950 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/events/namespace.proto
package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
import sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type NamespaceCreate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *NamespaceCreate) Reset() { *m = NamespaceCreate{} }
func (*NamespaceCreate) ProtoMessage() {}
func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
type NamespaceUpdate struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *NamespaceUpdate) Reset() { *m = NamespaceUpdate{} }
func (*NamespaceUpdate) ProtoMessage() {}
func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
type NamespaceDelete struct {
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *NamespaceDelete) Reset() { *m = NamespaceDelete{} }
func (*NamespaceDelete) ProtoMessage() {}
func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
func init() {
proto.RegisterType((*NamespaceCreate)(nil), "containerd.events.NamespaceCreate")
proto.RegisterType((*NamespaceUpdate)(nil), "containerd.events.NamespaceUpdate")
proto.RegisterType((*NamespaceDelete)(nil), "containerd.events.NamespaceDelete")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
case "labels":
// Labels fields have been special-cased by name. If this breaks,
// add better special casing to fieldpath plugin.
if len(m.Labels) == 0 {
return "", false
}
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
return value, ok
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
dAtA[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
if len(m.Labels) > 0 {
for k, _ := range m.Labels {
dAtA[i] = 0x12
i++
v := m.Labels[k]
mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Name) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
return i, nil
}
func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *NamespaceCreate) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovNamespace(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
}
}
return n
}
func (m *NamespaceUpdate) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovNamespace(uint64(l))
}
if len(m.Labels) > 0 {
for k, v := range m.Labels {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
}
}
return n
}
func (m *NamespaceDelete) Size() (n int) {
var l int
_ = l
l = len(m.Name)
if l > 0 {
n += 1 + l + sovNamespace(uint64(l))
}
return n
}
func sovNamespace(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozNamespace(x uint64) (n int) {
return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *NamespaceCreate) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&NamespaceCreate{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
`}`,
}, "")
return s
}
func (this *NamespaceUpdate) String() string {
if this == nil {
return "nil"
}
keysForLabels := make([]string, 0, len(this.Labels))
for k, _ := range this.Labels {
keysForLabels = append(keysForLabels, k)
}
sortkeys.Strings(keysForLabels)
mapStringForLabels := "map[string]string{"
for _, k := range keysForLabels {
mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
}
mapStringForLabels += "}"
s := strings.Join([]string{`&NamespaceUpdate{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`Labels:` + mapStringForLabels + `,`,
`}`,
}, "")
return s
}
func (this *NamespaceDelete) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&NamespaceDelete{`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func valueToStringNamespace(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *NamespaceCreate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthNamespace
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthNamespace
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthNamespace
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthNamespace
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthNamespace
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthNamespace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthNamespace
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthNamespace
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Labels == nil {
m.Labels = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthNamespace
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthNamespace
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthNamespace
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Labels[mapkey] = mapvalue
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthNamespace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *NamespaceDelete) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowNamespace
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthNamespace
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipNamespace(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthNamespace
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipNamespace(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowNamespace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowNamespace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowNamespace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthNamespace
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowNamespace
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipNamespace(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowNamespace = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/events/namespace.proto", fileDescriptorNamespace)
}
var fileDescriptorNamespace = []byte{
// 296 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0xe7, 0x25,
0xe6, 0xa6, 0x16, 0x17, 0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22,
0x94, 0xe9, 0x41, 0x94, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x65, 0xf5, 0x41, 0x2c, 0x88,
0x42, 0x29, 0x07, 0x82, 0xb6, 0x80, 0xd5, 0x25, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6, 0x67,
0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0x40, 0x4c, 0x50, 0x5a, 0xc1,
0xc8, 0xc5, 0xef, 0x07, 0xb3, 0xde, 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x55, 0x48, 0x88, 0x8b, 0x05,
0xe4, 0x22, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0xc8, 0x8d, 0x8b, 0x2d, 0x27,
0x31, 0x29, 0x35, 0xa7, 0x58, 0x82, 0x49, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x4f, 0x0f, 0xc3, 0x8d,
0x7a, 0x68, 0xe6, 0xe8, 0xf9, 0x80, 0x35, 0xb8, 0xe6, 0x95, 0x14, 0x55, 0x06, 0x41, 0x75, 0x4b,
0x59, 0x72, 0x71, 0x23, 0x09, 0x0b, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0x6d, 0x02, 0x31,
0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x15,
0x93, 0x05, 0x23, 0xaa, 0x53, 0x43, 0x0b, 0x52, 0xa8, 0xe2, 0x54, 0x88, 0x39, 0xd4, 0x76, 0xaa,
0x2a, 0x92, 0x4b, 0x5d, 0x52, 0x73, 0x52, 0xb1, 0xbb, 0xd4, 0x29, 0xe0, 0xc4, 0x43, 0x39, 0x86,
0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8, 0x31, 0x46, 0x19, 0x91, 0x90, 0x84, 0xac, 0x21,
0x54, 0x04, 0x43, 0x04, 0x63, 0x12, 0x1b, 0x38, 0x66, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
0x00, 0x50, 0x87, 0x59, 0x83, 0x02, 0x00, 0x00,
}

View file

@ -0,0 +1,23 @@
syntax = "proto3";
package containerd.events;
import weak "gogoproto/gogo.proto";
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message NamespaceCreate {
string name = 1;
map<string, string> labels = 2;
}
message NamespaceUpdate {
string name = 1;
map<string, string> labels = 2;
}
message NamespaceDelete {
string name = 1;
}

View file

@ -0,0 +1,704 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/events/snapshot.proto
package events
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import containerd_plugin "github.com/containerd/containerd/protobuf/plugin"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type SnapshotPrepare struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
}
func (m *SnapshotPrepare) Reset() { *m = SnapshotPrepare{} }
func (*SnapshotPrepare) ProtoMessage() {}
func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
type SnapshotCommit struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
}
func (m *SnapshotCommit) Reset() { *m = SnapshotCommit{} }
func (*SnapshotCommit) ProtoMessage() {}
func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
type SnapshotRemove struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
}
func (m *SnapshotRemove) Reset() { *m = SnapshotRemove{} }
func (*SnapshotRemove) ProtoMessage() {}
func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
func init() {
proto.RegisterType((*SnapshotPrepare)(nil), "containerd.events.SnapshotPrepare")
proto.RegisterType((*SnapshotCommit)(nil), "containerd.events.SnapshotCommit")
proto.RegisterType((*SnapshotRemove)(nil), "containerd.events.SnapshotRemove")
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
case "parent":
return string(m.Parent), len(m.Parent) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
case "name":
return string(m.Name), len(m.Name) > 0
}
return "", false
}
// Field returns the value for the given fieldpath as a string, if defined.
// If the value is not defined, the second value will be false.
func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) {
if len(fieldpath) == 0 {
return "", false
}
switch fieldpath[0] {
case "key":
return string(m.Key), len(m.Key) > 0
}
return "", false
}
func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Parent) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
i += copy(dAtA[i:], m.Parent)
}
return i, nil
}
func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Name) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
i += copy(dAtA[i:], m.Name)
}
return i, nil
}
func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Key) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
return i, nil
}
func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *SnapshotPrepare) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovSnapshot(uint64(l))
}
l = len(m.Parent)
if l > 0 {
n += 1 + l + sovSnapshot(uint64(l))
}
return n
}
func (m *SnapshotCommit) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovSnapshot(uint64(l))
}
l = len(m.Name)
if l > 0 {
n += 1 + l + sovSnapshot(uint64(l))
}
return n
}
func (m *SnapshotRemove) Size() (n int) {
var l int
_ = l
l = len(m.Key)
if l > 0 {
n += 1 + l + sovSnapshot(uint64(l))
}
return n
}
func sovSnapshot(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozSnapshot(x uint64) (n int) {
return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *SnapshotPrepare) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SnapshotPrepare{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
`}`,
}, "")
return s
}
func (this *SnapshotCommit) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SnapshotCommit{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
`}`,
}, "")
return s
}
func (this *SnapshotRemove) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&SnapshotRemove{`,
`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
`}`,
}, "")
return s
}
func valueToStringSnapshot(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Parent = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSnapshot(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSnapshot
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SnapshotCommit) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Name = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSnapshot(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSnapshot
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *SnapshotRemove) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSnapshot
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSnapshot
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSnapshot(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSnapshot
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipSnapshot(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnapshot
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnapshot
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnapshot
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSnapshot
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSnapshot
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipSnapshot(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/events/snapshot.proto", fileDescriptorSnapshot)
}
var fileDescriptorSnapshot = []byte{
// 235 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4a, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x17, 0xe7,
0x25, 0x16, 0x14, 0x67, 0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x22, 0x54,
0xe9, 0x41, 0x54, 0x48, 0x39, 0x10, 0x34, 0x0e, 0xac, 0x35, 0xa9, 0x34, 0x4d, 0xbf, 0x20, 0xa7,
0x34, 0x3d, 0x33, 0x4f, 0x3f, 0x2d, 0x33, 0x35, 0x27, 0xa5, 0x20, 0xb1, 0x24, 0x03, 0x62, 0xa8,
0x92, 0x35, 0x17, 0x7f, 0x30, 0xd4, 0x9a, 0x80, 0xa2, 0xd4, 0x82, 0xc4, 0xa2, 0x54, 0x21, 0x01,
0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x10, 0x53, 0x48, 0x8c,
0x8b, 0x0d, 0x24, 0x93, 0x57, 0x22, 0xc1, 0x04, 0x16, 0x84, 0xf2, 0x94, 0xcc, 0xb8, 0xf8, 0x60,
0x9a, 0x9d, 0xf3, 0x73, 0x73, 0x33, 0x4b, 0xb0, 0xe8, 0x15, 0xe2, 0x62, 0xc9, 0x4b, 0xcc, 0x4d,
0x85, 0xea, 0x04, 0xb3, 0x95, 0x94, 0x10, 0xfa, 0x82, 0x52, 0x73, 0xf3, 0xcb, 0xb0, 0xd8, 0xe9,
0x14, 0x70, 0xe2, 0xa1, 0x1c, 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c,
0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x05, 0x5f, 0xe4, 0x18, 0xa3,
0x8c, 0x48, 0x08, 0x47, 0x6b, 0x08, 0x15, 0xc1, 0x90, 0xc4, 0x06, 0xf6, 0xb3, 0x31, 0x20, 0x00,
0x00, 0xff, 0xff, 0x69, 0x66, 0xa9, 0x2a, 0x86, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,22 @@
syntax = "proto3";
package containerd.events;
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message SnapshotPrepare {
string key = 1;
string parent = 2;
}
message SnapshotCommit {
string key = 1;
string name = 2;
}
message SnapshotRemove {
string key = 1;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,75 @@
syntax = "proto3";
package containerd.events;
import weak "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
option go_package = "github.com/containerd/containerd/api/events;events";
option (containerd.plugin.fieldpath_all) = true;
message TaskCreate {
string container_id = 1;
string bundle = 2;
repeated containerd.types.Mount rootfs = 3;
TaskIO io = 4 [(gogoproto.customname) = "IO"];
string checkpoint = 5;
uint32 pid = 6;
}
message TaskStart {
string container_id = 1;
uint32 pid = 2;
}
message TaskDelete {
string container_id = 1;
uint32 pid = 2;
uint32 exit_status = 3;
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message TaskIO {
string stdin = 1;
string stdout = 2;
string stderr = 3;
bool terminal = 4;
}
message TaskExit {
string container_id = 1;
string id = 2;
uint32 pid = 3;
uint32 exit_status = 4;
google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message TaskOOM {
string container_id = 1;
}
message TaskExecAdded {
string container_id = 1;
string exec_id = 2;
}
message TaskExecStarted {
string container_id = 1;
string exec_id = 2;
uint32 pid = 3;
}
message TaskPaused {
string container_id = 1;
}
message TaskResumed {
string container_id = 1;
}
message TaskCheckpointed {
string container_id = 1;
string checkpoint = 2;
}

4181
vendor/github.com/containerd/containerd/api/next.pb.txt generated vendored Executable file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,158 @@
syntax = "proto3";
package containerd.services.containers.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
// Containers provides metadata storage for containers used in the execution
// service.
//
// The objects here provide an state-independent view of containers for use in
// management and resource pinning. From that perspective, containers do not
// have a "state" but rather this is the set of resources that will be
// considered in use by the container.
//
// From the perspective of the execution service, these objects represent the
// base parameters for creating a container process.
//
// In general, when looking to add fields for this type, first ask yourself
// whether or not the function of the field has to do with runtime execution or
// is invariant of the runtime state of the container. If it has to do with
// runtime, or changes as the "container" is started and stops, it probably
// doesn't belong on this object.
service Containers {
rpc Get(GetContainerRequest) returns (GetContainerResponse);
rpc List(ListContainersRequest) returns (ListContainersResponse);
rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
}
message Container {
// ID is the user-specified identifier.
//
// This field may not be updated.
string id = 1;
// Labels provides an area to include arbitrary data on containers.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
//
// Note that to add a new value to this field, read the existing set and
// include the entire result in the update call.
map<string, string> labels = 2;
// Image contains the reference of the image used to build the
// specification and snapshots for running this container.
//
// If this field is updated, the spec and rootfs needed to updated, as well.
string image = 3;
message Runtime {
// Name is the name of the runtime.
string name = 1;
// Options specify additional runtime initialization options.
google.protobuf.Any options = 2;
}
// Runtime specifies which runtime to use for executing this container.
Runtime runtime = 4;
// Spec to be used when creating the container. This is runtime specific.
google.protobuf.Any spec = 5;
// Snapshotter specifies the snapshotter name used for rootfs
string snapshotter = 6;
// SnapshotKey specifies the snapshot key to use for the container's root
// filesystem. When starting a task from this container, a caller should
// look up the mounts from the snapshot service and include those on the
// task create request.
//
// Snapshots referenced in this field will not be garbage collected.
//
// This field is set to empty when the rootfs is not a snapshot.
//
// This field may be updated.
string snapshot_key = 7;
// CreatedAt is the time the container was first created.
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt is the last time the container was mutated.
google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Extensions allow clients to provide zero or more blobs that are directly
// associated with the container. One may provide protobuf, json, or other
// encoding formats. The primary use of this is to further decorate the
// container object with fields that may be specific to a client integration.
//
// The key portion of this map should identify a "name" for the extension
// that should be unique against other extensions. When updating extension
// data, one should only update the specified extension using field paths
// to select a specific map key.
map<string, google.protobuf.Any> extensions = 10 [(gogoproto.nullable) = false];
}
message GetContainerRequest {
string id = 1;
}
message GetContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message ListContainersRequest {
// Filters contains one or more filters using the syntax defined in the
// containerd filter package.
//
// The returned result will be those that match any of the provided
// filters. Expanded, containers that match the following will be
// returned:
//
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
//
// If filters is zero-length or nil, all items will be returned.
repeated string filters = 1;
}
message ListContainersResponse {
repeated Container containers = 1 [(gogoproto.nullable) = false];
}
message CreateContainerRequest {
Container container = 1 [(gogoproto.nullable) = false];
}
message CreateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
// UpdateContainerRequest updates the metadata on one or more container.
//
// The operation should follow semantics described in
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
// unless otherwise qualified.
message UpdateContainerRequest {
// Container provides the target values, as declared by the mask, for the update.
//
// The ID field must be set.
Container container = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateContainerResponse {
Container container = 1 [(gogoproto.nullable) = false];
}
message DeleteContainerRequest {
string id = 1;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,318 @@
syntax = "proto3";
package containerd.services.content.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/empty.proto";
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
// Content provides access to a content addressable storage system.
service Content {
// Info returns information about a committed object.
//
// This call can be used for getting the size of content and checking for
// existence.
rpc Info(InfoRequest) returns (InfoResponse);
// Update updates content metadata.
//
// This call can be used to manage the mutable content labels. The
// immutable metadata such as digest, size, and committed at cannot
// be updated.
rpc Update(UpdateRequest) returns (UpdateResponse);
// List streams the entire set of content as Info objects and closes the
// stream.
//
// Typically, this will yield a large response, chunked into messages.
// Clients should make provisions to ensure they can handle the entire data
// set.
rpc List(ListContentRequest) returns (stream ListContentResponse);
// Delete will delete the referenced object.
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
// Read allows one to read an object based on the offset into the content.
//
// The requested data may be returned in one or more messages.
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
// Status returns the status for a single reference.
rpc Status(StatusRequest) returns (StatusResponse);
// ListStatuses returns the status of ongoing object ingestions, started via
// Write.
//
// Only those matching the regular expression will be provided in the
// response. If the provided regular expression is empty, all ingestions
// will be provided.
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
// Write begins or resumes writes to a resource identified by a unique ref.
// Only one active stream may exist at a time for each ref.
//
// Once a write stream has started, it may only write to a single ref, thus
// once a stream is started, the ref may be ommitted on subsequent writes.
//
// For any write transaction represented by a ref, only a single write may
// be made to a given offset. If overlapping writes occur, it is an error.
// Writes should be sequential and implementations may throw an error if
// this is required.
//
// If expected_digest is set and already part of the content store, the
// write will fail.
//
// When completed, the commit flag should be set to true. If expected size
// or digest is set, the content will be validated against those values.
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
// Abort cancels the ongoing write named in the request. Any resources
// associated with the write will be collected.
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
}
message Info {
// Digest is the hash identity of the blob.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Size is the total number of bytes in the blob.
int64 size = 2;
// CreatedAt provides the time at which the blob was committed.
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt provides the time the info was last updated.
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 5;
}
message InfoRequest {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message InfoResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message UpdateRequest {
Info info = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
//
// In info, Digest, Size, and CreatedAt are immutable,
// other field may be updated using this mask.
// If no mask is provided, all mutable field are updated.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListContentRequest {
// Filters contains one or more filters using the syntax defined in the
// containerd filter package.
//
// The returned result will be those that match any of the provided
// filters. Expanded, containers that match the following will be
// returned:
//
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
//
// If filters is zero-length or nil, all items will be returned.
repeated string filters = 1;
}
message ListContentResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message DeleteContentRequest {
// Digest specifies which content to delete.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
// ReadContentRequest defines the fields that make up a request to read a portion of
// data from a stored object.
message ReadContentRequest {
// Digest is the hash identity to read.
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the read. If zero or less, the read will be from the start. This uses
// standard zero-indexed semantics.
int64 offset = 2;
// size is the total size of the read. If zero, the entire blob will be
// returned by the service.
int64 size = 3;
}
// ReadContentResponse carries byte data for a read request.
message ReadContentResponse {
int64 offset = 1; // offset of the returned data
bytes data = 2; // actual data
}
message Status {
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string ref = 3;
int64 offset = 4;
int64 total = 5;
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message StatusRequest {
string ref = 1;
}
message StatusResponse {
Status status = 1;
}
message ListStatusesRequest {
repeated string filters = 1;
}
message ListStatusesResponse {
repeated Status statuses = 1 [(gogoproto.nullable) = false];
}
// WriteAction defines the behavior of a WriteRequest.
enum WriteAction {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "WriteAction";
// WriteActionStat instructs the writer to return the current status while
// holding the lock on the write.
STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
// WriteActionWrite sets the action for the write request to write data.
//
// Any data included will be written at the provided offset. The
// transaction will be left open for further writes.
//
// This is the default.
WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
// WriteActionCommit will write any outstanding data in the message and
// commit the write, storing it under the digest.
//
// This can be used in a single message to send the data, verify it and
// commit it.
//
// This action will always terminate the write.
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
}
// WriteContentRequest writes data to the request ref at offset.
message WriteContentRequest {
// Action sets the behavior of the write.
//
// When this is a write and the ref is not yet allocated, the ref will be
// allocated and the data will be written at offset.
//
// If the action is write and the ref is allocated, it will accept data to
// an offset that has not yet been written.
//
// If the action is write and there is no data, the current write status
// will be returned. This works differently from status because the stream
// holds a lock.
WriteAction action = 1;
// Ref identifies the pre-commit object to write to.
string ref = 2;
// Total can be set to have the service validate the total size of the
// committed content.
//
// The latest value before or with the commit action message will be use to
// validate the content. If the offset overflows total, the service may
// report an error. It is only required on one message for the write.
//
// If the value is zero or less, no validation of the final content will be
// performed.
int64 total = 3;
// Expected can be set to have the service validate the final content against
// the provided digest.
//
// If the digest is already present in the object store, an AlreadyExists
// error will be returned.
//
// Only the latest version will be used to check the content against the
// digest. It is only required to include it on a single message, before or
// with the commit action message.
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
// Offset specifies the number of bytes from the start at which to begin
// the write. For most implementations, this means from the start of the
// file. This uses standard, zero-indexed semantics.
//
// If the action is write, the remote may remove all previously written
// data after the offset. Implementations may support arbitrary offsets but
// MUST support reseting this value to zero with a write. If an
// implementation does not support a write at a particular offset, an
// OutOfRange error must be returned.
int64 offset = 5;
// Data is the actual bytes to be written.
//
// If this is empty and the message is not a commit, a response will be
// returned with the current write state.
bytes data = 6;
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 7;
}
// WriteContentResponse is returned on the culmination of a write call.
message WriteContentResponse {
// Action contains the action for the final message of the stream. A writer
// should confirm that they match the intended result.
WriteAction action = 1;
// StartedAt provides the time at which the write began.
//
// This must be set for stat and commit write actions. All other write
// actions may omit this.
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt provides the last time of a successful write.
//
// This must be set for stat and commit write actions. All other write
// actions may omit this.
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Offset is the current committed size for the write.
int64 offset = 4;
// Total provides the current, expected total size of the write.
//
// We include this to provide consistency with the Status structure on the
// client writer.
//
// This is only valid on the Stat and Commit response.
int64 total = 5;
// Digest, if present, includes the digest up to the currently committed
// bytes. If action is commit, this field will be set. It is implementation
// defined if this is set for other actions.
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
}
message AbortRequest {
string ref = 1;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,62 @@
syntax = "proto3";
package containerd.services.diff.v1;
import weak "gogoproto/gogo.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/descriptor.proto";
option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
// Diff service creates and applies diffs
service Diff {
// Apply applies the content associated with the provided digests onto
// the provided mounts. Archive content will be extracted and
// decompressed if necessary.
rpc Apply(ApplyRequest) returns (ApplyResponse);
// Diff creates a diff between the given mounts and uploads the result
// to the content store.
rpc Diff(DiffRequest) returns (DiffResponse);
}
message ApplyRequest {
// Diff is the descriptor of the diff to be extracted
containerd.types.Descriptor diff = 1;
repeated containerd.types.Mount mounts = 2;
}
message ApplyResponse {
// Applied is the descriptor for the object which was applied.
// If the input was a compressed blob then the result will be
// the descriptor for the uncompressed blob.
containerd.types.Descriptor applied = 1;
}
message DiffRequest {
// Left are the mounts which represent the older copy
// in which is the base of the computed changes.
repeated containerd.types.Mount left = 1;
// Right are the mounts which represents the newer copy
// in which changes from the left were made into.
repeated containerd.types.Mount right = 2;
// MediaType is the media type descriptor for the created diff
// object
string media_type = 3;
// Ref identifies the pre-commit content store object. This
// reference can be used to get the status from the content store.
string ref = 4;
// Labels are the labels to apply to the generated content
// on content store commit.
map<string, string> labels = 5;
}
message DiffResponse {
// Diff is the descriptor of the diff which can be applied
containerd.types.Descriptor diff = 3;
}

View file

@ -0,0 +1,18 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package events defines the event pushing and subscription service.
package events

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,56 @@
syntax = "proto3";
package containerd.services.events.v1;
import weak "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
import weak "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
service Events {
// Publish an event to a topic.
//
// The event will be packed into a timestamp envelope with the namespace
// introspected from the context. The envelope will then be dispatched.
rpc Publish(PublishRequest) returns (google.protobuf.Empty);
// Forward sends an event that has already been packaged into an envelope
// with a timestamp and namespace.
//
// This is useful if earlier timestamping is required or when fowarding on
// behalf of another component, namespace or publisher.
rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
// Subscribe to a stream of events, possibly returning only that match any
// of the provided filters.
//
// Unlike many other methods in containerd, subscribers will get messages
// from all namespaces unless otherwise specified. If this is not desired,
// a filter can be provided in the format 'namespace==<namespace>' to
// restrict the received events.
rpc Subscribe(SubscribeRequest) returns (stream Envelope);
}
message PublishRequest {
string topic = 1;
google.protobuf.Any event = 2;
}
message ForwardRequest {
Envelope envelope = 1;
}
message SubscribeRequest {
repeated string filters = 1;
}
message Envelope {
option (containerd.plugin.fieldpath) = true;
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string namespace = 2;
string topic = 3;
google.protobuf.Any event = 4;
}

View file

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package images

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,124 @@
syntax = "proto3";
package containerd.services.images.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/descriptor.proto";
option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
// Images is a service that allows one to register images with containerd.
//
// In containerd, an image is merely the mapping of a name to a content root,
// described by a descriptor. The behavior and state of image is purely
// dictated by the type of the descriptor.
//
// From the perspective of this service, these references are mostly shallow,
// in that the existence of the required content won't be validated until
// required by consuming services.
//
// As such, this can really be considered a "metadata service".
service Images {
// Get returns an image by name.
rpc Get(GetImageRequest) returns (GetImageResponse);
// List returns a list of all images known to containerd.
rpc List(ListImagesRequest) returns (ListImagesResponse);
// Create an image record in the metadata store.
//
// The name of the image must be unique.
rpc Create(CreateImageRequest) returns (CreateImageResponse);
// Update assigns the name to a given target image based on the provided
// image.
rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
// Delete deletes the image by name.
rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
}
message Image {
// Name provides a unique name for the image.
//
// Containerd treats this as the primary identifier.
string name = 1;
// Labels provides free form labels for the image. These are runtime only
// and do not get inherited into the package image in any way.
//
// Labels may be updated using the field mask.
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 2;
// Target describes the content entry point of the image.
containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
// CreatedAt is the time the image was first created.
google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt is the last time the image was mutated.
google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message GetImageRequest {
string name = 1;
}
message GetImageResponse {
Image image = 1;
}
message CreateImageRequest {
Image image = 1 [(gogoproto.nullable) = false];
}
message CreateImageResponse {
Image image = 1 [(gogoproto.nullable) = false];
}
message UpdateImageRequest {
// Image provides a full or partial image for update.
//
// The name field must be set or an error will be returned.
Image image = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateImageResponse {
Image image = 1 [(gogoproto.nullable) = false];
}
message ListImagesRequest {
// Filters contains one or more filters using the syntax defined in the
// containerd filter package.
//
// The returned result will be those that match any of the provided
// filters. Expanded, images that match the following will be
// returned:
//
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
//
// If filters is zero-length or nil, all items will be returned.
repeated string filters = 1;
}
message ListImagesResponse {
repeated Image images = 1 [(gogoproto.nullable) = false];
}
message DeleteImageRequest {
string name = 1;
// Sync indicates that the delete and cleanup should be done
// synchronously before returning to the caller
//
// Default is false
bool sync = 2;
}

View file

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package introspection

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,81 @@
syntax = "proto3";
package containerd.services.introspection.v1;
import "github.com/containerd/containerd/api/types/platform.proto";
import "google/rpc/status.proto";
import weak "gogoproto/gogo.proto";
option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
service Introspection {
// Plugins returns a list of plugins in containerd.
//
// Clients can use this to detect features and capabilities when using
// containerd.
rpc Plugins(PluginsRequest) returns (PluginsResponse);
}
message Plugin {
// Type defines the type of plugin.
//
// See package plugin for a list of possible values. Non core plugins may
// define their own values during registration.
string type = 1;
// ID identifies the plugin uniquely in the system.
string id = 2;
// Requires lists the plugin types required by this plugin.
repeated string requires = 3;
// Platforms enumerates the platforms this plugin will support.
//
// If values are provided here, the plugin will only be operable under the
// provided platforms.
//
// If this is empty, the plugin will work across all platforms.
//
// If the plugin prefers certain platforms over others, they should be
// listed from most to least preferred.
repeated types.Platform platforms = 4 [(gogoproto.nullable) = false];
// Exports allows plugins to provide values about state or configuration to
// interested parties.
//
// One example is exposing the configured path of a snapshotter plugin.
map<string, string> exports = 5;
// Capabilities allows plugins to communicate feature switches to allow
// clients to detect features that may not be on be default or may be
// different from version to version.
//
// Use this sparingly.
repeated string capabilities = 6;
// InitErr will be set if the plugin fails initialization.
//
// This means the plugin may have been registered but a non-terminal error
// was encountered during initialization.
//
// Plugins that have this value set cannot be used.
google.rpc.Status init_err = 7;
}
message PluginsRequest {
// Filters contains one or more filters using the syntax defined in the
// containerd filter package.
//
// The returned result will be those that match any of the provided
// filters. Expanded, plugins that match the following will be
// returned:
//
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
//
// If filters is zero-length or nil, all items will be returned.
repeated string filters = 1;
}
message PluginsResponse {
repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
}

View file

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leases

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,58 @@
syntax = "proto3";
package containerd.services.leases.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/leases/v1;leases";
// Leases service manages resources leases within the metadata store.
service Leases {
// Create creates a new lease for managing changes to metadata. A lease
// can be used to protect objects from being removed.
rpc Create(CreateRequest) returns (CreateResponse);
// Delete deletes the lease and makes any unreferenced objects created
// during the lease eligible for garbage collection if not referenced
// or retained by other resources during the lease.
rpc Delete(DeleteRequest) returns (google.protobuf.Empty);
// List lists all active leases, returning the full list of
// leases and optionally including the referenced resources.
rpc List(ListRequest) returns (ListResponse);
}
// Lease is an object which retains resources while it exists.
message Lease {
string id = 1;
google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
map<string, string> labels = 3;
}
message CreateRequest {
// ID is used to identity the lease, when the id is not set the service
// generates a random identifier for the lease.
string id = 1;
map<string, string> labels = 3;
}
message CreateResponse {
Lease lease = 1;
}
message DeleteRequest {
string id = 1;
}
message ListRequest {
repeated string filters = 1;
}
message ListResponse {
repeated Lease leases = 1;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,92 @@
syntax = "proto3";
package containerd.services.namespaces.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
// Namespaces provides the ability to manipulate containerd namespaces.
//
// All objects in the system are required to be a member of a namespace. If a
// namespace is deleted, all objects, including containers, images and
// snapshots, will be deleted, as well.
//
// Unless otherwise noted, operations in containerd apply only to the namespace
// supplied per request.
//
// I hope this goes without saying, but namespaces are themselves NOT
// namespaced.
service Namespaces {
rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);
rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);
rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);
rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);
rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);
}
message Namespace {
string name = 1;
// Labels provides an area to include arbitrary data on namespaces.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
//
// Note that to add a new value to this field, read the existing set and
// include the entire result in the update call.
map<string, string> labels = 2;
}
message GetNamespaceRequest {
string name = 1;
}
message GetNamespaceResponse {
Namespace namespace = 1 [(gogoproto.nullable) = false];
}
message ListNamespacesRequest {
string filter = 1;
}
message ListNamespacesResponse {
repeated Namespace namespaces = 1 [(gogoproto.nullable) = false];
}
message CreateNamespaceRequest {
Namespace namespace = 1 [(gogoproto.nullable) = false];
}
message CreateNamespaceResponse {
Namespace namespace = 1 [(gogoproto.nullable) = false];
}
// UpdateNamespaceRequest updates the metadata for a namespace.
//
// The operation should follow semantics described in
// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
// unless otherwise qualified.
message UpdateNamespaceRequest {
// Namespace provides the target value, as declared by the mask, for the update.
//
// The namespace field must be set.
Namespace namespace = 1 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
//
// For the most part, this applies only to selectively updating labels on
// the namespace. While field masks are typically limited to ascii alphas
// and digits, we just take everything after the "labels." as the map key.
google.protobuf.FieldMask update_mask = 2;
}
message UpdateNamespaceResponse {
Namespace namespace = 1 [(gogoproto.nullable) = false];
}
message DeleteNamespaceRequest {
string name = 1;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,150 @@
syntax = "proto3";
package containerd.services.snapshots.v1;
import weak "gogoproto/gogo.proto";
import "google/protobuf/empty.proto";
import "google/protobuf/field_mask.proto";
import "google/protobuf/timestamp.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
option go_package = "github.com/containerd/containerd/api/services/snapshots/v1;snapshots";
// Snapshot service manages snapshots
service Snapshots {
rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
rpc Mounts(MountsRequest) returns (MountsResponse);
rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);
rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
rpc Usage(UsageRequest) returns (UsageResponse);
}
message PrepareSnapshotRequest {
string snapshotter = 1;
string key = 2;
string parent = 3;
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 4;
}
message PrepareSnapshotResponse {
repeated containerd.types.Mount mounts = 1;
}
message ViewSnapshotRequest {
string snapshotter = 1;
string key = 2;
string parent = 3;
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 4;
}
message ViewSnapshotResponse {
repeated containerd.types.Mount mounts = 1;
}
message MountsRequest {
string snapshotter = 1;
string key = 2;
}
message MountsResponse {
repeated containerd.types.Mount mounts = 1;
}
message RemoveSnapshotRequest {
string snapshotter = 1;
string key = 2;
}
message CommitSnapshotRequest {
string snapshotter = 1;
string name = 2;
string key = 3;
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 4;
}
message StatSnapshotRequest {
string snapshotter = 1;
string key = 2;
}
enum Kind {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "Kind";
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
}
message Info {
string name = 1;
string parent = 2;
Kind kind = 3;
// CreatedAt provides the time at which the snapshot was created.
google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// UpdatedAt provides the time the info was last updated.
google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
// Labels are arbitrary data on snapshots.
//
// The combined size of a key/value pair cannot exceed 4096 bytes.
map<string, string> labels = 6;
}
message StatSnapshotResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message UpdateSnapshotRequest {
string snapshotter = 1;
Info info = 2 [(gogoproto.nullable) = false];
// UpdateMask specifies which fields to perform the update on. If empty,
// the operation applies to all fields.
//
// In info, Name, Parent, Kind, Created are immutable,
// other field may be updated using this mask.
// If no mask is provided, all mutable field are updated.
google.protobuf.FieldMask update_mask = 3;
}
message UpdateSnapshotResponse {
Info info = 1 [(gogoproto.nullable) = false];
}
message ListSnapshotsRequest{
string snapshotter = 1;
}
message ListSnapshotsResponse {
repeated Info info = 1 [(gogoproto.nullable) = false];
}
message UsageRequest {
string snapshotter = 1;
string key = 2;
}
message UsageResponse {
int64 size = 1;
int64 inodes = 2;
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,209 @@
syntax = "proto3";
package containerd.services.tasks.v1;
import "google/protobuf/empty.proto";
import "google/protobuf/any.proto";
import weak "gogoproto/gogo.proto";
import "github.com/containerd/containerd/api/types/mount.proto";
import "github.com/containerd/containerd/api/types/metrics.proto";
import "github.com/containerd/containerd/api/types/descriptor.proto";
import "github.com/containerd/containerd/api/types/task/task.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
service Tasks {
// Create a task.
rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
// Start a process.
rpc Start(StartRequest) returns (StartResponse);
// Delete a task and on disk state.
rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
rpc Get(GetRequest) returns (GetResponse);
rpc List(ListTasksRequest) returns (ListTasksResponse);
// Kill a task or process.
rpc Kill(KillRequest) returns (google.protobuf.Empty);
rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
rpc Metrics(MetricsRequest) returns (MetricsResponse);
rpc Wait(WaitRequest) returns (WaitResponse);
}
message CreateTaskRequest {
string container_id = 1;
// RootFS provides the pre-chroot mounts to perform in the shim before
// executing the container task.
//
// These are for mounts that cannot be performed in the user namespace.
// Typically, these mounts should be resolved from snapshots specified on
// the container object.
repeated containerd.types.Mount rootfs = 3;
string stdin = 4;
string stdout = 5;
string stderr = 6;
bool terminal = 7;
containerd.types.Descriptor checkpoint = 8;
google.protobuf.Any options = 9;
}
message CreateTaskResponse {
string container_id = 1;
uint32 pid = 2;
}
message StartRequest {
string container_id = 1;
string exec_id = 2;
}
message StartResponse {
uint32 pid = 1;
}
message DeleteTaskRequest {
string container_id = 1;
}
message DeleteResponse {
string id = 1;
uint32 pid = 2;
uint32 exit_status = 3;
google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message DeleteProcessRequest {
string container_id = 1;
string exec_id = 2;
}
message GetRequest {
string container_id = 1;
string exec_id = 2;
}
message GetResponse {
containerd.v1.types.Process process = 1;
}
message ListTasksRequest {
string filter = 1;
}
message ListTasksResponse {
repeated containerd.v1.types.Process tasks = 1;
}
message KillRequest {
string container_id = 1;
string exec_id = 2;
uint32 signal = 3;
bool all = 4;
}
message ExecProcessRequest {
string container_id = 1;
string stdin = 2;
string stdout = 3;
string stderr = 4;
bool terminal = 5;
// Spec for starting a process in the target container.
//
// For runc, this is a process spec, for example.
google.protobuf.Any spec = 6;
// id of the exec process
string exec_id = 7;
}
message ExecProcessResponse {
}
message ResizePtyRequest {
string container_id = 1;
string exec_id = 2;
uint32 width = 3;
uint32 height = 4;
}
message CloseIORequest {
string container_id = 1;
string exec_id = 2;
bool stdin = 3;
}
message PauseTaskRequest {
string container_id = 1;
}
message ResumeTaskRequest {
string container_id = 1;
}
message ListPidsRequest {
string container_id = 1;
}
message ListPidsResponse {
// Processes includes the process ID and additional process information
repeated containerd.v1.types.ProcessInfo processes = 1;
}
message CheckpointTaskRequest {
string container_id = 1;
string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
google.protobuf.Any options = 3;
}
message CheckpointTaskResponse {
repeated containerd.types.Descriptor descriptors = 1;
}
message UpdateTaskRequest {
string container_id = 1;
google.protobuf.Any resources = 2;
}
message MetricsRequest {
repeated string filters = 1;
}
message MetricsResponse {
repeated types.Metric metrics = 1;
}
message WaitRequest {
string container_id = 1;
string exec_id = 2;
}
message WaitResponse {
uint32 exit_status = 1;
google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

View file

@ -0,0 +1,446 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/services/version/v1/version.proto
/*
Package version is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/api/services/version/v1/version.proto
It has these top-level messages:
VersionResponse
*/
package version
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/gogo/protobuf/types"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type VersionResponse struct {
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
}
func (m *VersionResponse) Reset() { *m = VersionResponse{} }
func (*VersionResponse) ProtoMessage() {}
func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
func init() {
proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for Version service
type VersionClient interface {
Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
}
type versionClient struct {
cc *grpc.ClientConn
}
func NewVersionClient(cc *grpc.ClientConn) VersionClient {
return &versionClient{cc}
}
func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
out := new(VersionResponse)
err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Version service
type VersionServer interface {
Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error)
}
func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
s.RegisterService(&_Version_serviceDesc, srv)
}
func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(google_protobuf.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VersionServer).Version(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.services.version.v1.Version/Version",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
}
return interceptor(ctx, in, info, handler)
}
var _Version_serviceDesc = grpc.ServiceDesc{
ServiceName: "containerd.services.version.v1.Version",
HandlerType: (*VersionServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Version",
Handler: _Version_Version_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
}
func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Version) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
i += copy(dAtA[i:], m.Version)
}
if len(m.Revision) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
i += copy(dAtA[i:], m.Revision)
}
return i, nil
}
func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *VersionResponse) Size() (n int) {
var l int
_ = l
l = len(m.Version)
if l > 0 {
n += 1 + l + sovVersion(uint64(l))
}
l = len(m.Revision)
if l > 0 {
n += 1 + l + sovVersion(uint64(l))
}
return n
}
func sovVersion(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozVersion(x uint64) (n int) {
return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *VersionResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&VersionResponse{`,
`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
`}`,
}, "")
return s
}
func valueToStringVersion(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *VersionResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVersion
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVersion
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthVersion
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Version = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowVersion
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthVersion
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Revision = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipVersion(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthVersion
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipVersion(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowVersion
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowVersion
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowVersion
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthVersion
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowVersion
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipVersion(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowVersion = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
}
var fileDescriptorVersion = []byte{
// 243 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x46, 0x30,
0x26, 0xb1, 0x81, 0x9d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x95, 0x0d, 0x52, 0x23, 0xa9,
0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,18 @@
syntax = "proto3";
package containerd.services.version.v1;
import "google/protobuf/empty.proto";
import weak "gogoproto/gogo.proto";
// TODO(stevvooe): Should version service actually be versioned?
option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
service Version {
rpc Version(google.protobuf.Empty) returns (VersionResponse);
}
message VersionResponse {
string version = 1;
string revision = 2;
}

View file

@ -0,0 +1,410 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/descriptor.proto
/*
Package types is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/api/types/descriptor.proto
github.com/containerd/containerd/api/types/metrics.proto
github.com/containerd/containerd/api/types/mount.proto
github.com/containerd/containerd/api/types/platform.proto
It has these top-level messages:
Descriptor
Metric
Mount
Platform
*/
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Descriptor describes a blob in a content store.
//
// This descriptor can be used to reference content from an
// oci descriptor found in a manifest.
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
type Descriptor struct {
MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
}
func (m *Descriptor) Reset() { *m = Descriptor{} }
func (*Descriptor) ProtoMessage() {}
func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
func init() {
proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
}
func (m *Descriptor) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.MediaType) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
i += copy(dAtA[i:], m.MediaType)
}
if len(m.Digest) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
if m.Size_ != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
}
return i, nil
}
func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Descriptor) Size() (n int) {
var l int
_ = l
l = len(m.MediaType)
if l > 0 {
n += 1 + l + sovDescriptor(uint64(l))
}
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovDescriptor(uint64(l))
}
if m.Size_ != 0 {
n += 1 + sovDescriptor(uint64(m.Size_))
}
return n
}
func sovDescriptor(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozDescriptor(x uint64) (n int) {
return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Descriptor) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Descriptor{`,
`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
`}`,
}, "")
return s
}
func valueToStringDescriptor(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Descriptor) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Descriptor: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDescriptor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MediaType = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthDescriptor
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
}
m.Size_ = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDescriptor
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Size_ |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipDescriptor(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDescriptor
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipDescriptor(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthDescriptor
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowDescriptor
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipDescriptor(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowDescriptor = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
}
var fileDescriptorDescriptor = []byte{
// 234 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
0x7c, 0x60, 0x58, 0x83, 0xc9, 0x08, 0x86, 0x24, 0x36, 0xb0, 0x17, 0x8d, 0x01, 0x01, 0x00, 0x00,
0xff, 0xff, 0xea, 0xac, 0x78, 0x9a, 0x49, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,18 @@
syntax = "proto3";
package containerd.types;
import weak "gogoproto/gogo.proto";
option go_package = "github.com/containerd/containerd/api/types;types";
// Descriptor describes a blob in a content store.
//
// This descriptor can be used to reference content from an
// oci descriptor found in a manifest.
// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
message Descriptor {
string media_type = 1;
string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
int64 size = 3;
}

View file

@ -0,0 +1,17 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types

View file

@ -0,0 +1,412 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/metrics.proto
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import google_protobuf1 "github.com/gogo/protobuf/types"
import _ "github.com/gogo/protobuf/types"
import time "time"
import types1 "github.com/gogo/protobuf/types"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
type Metric struct {
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
}
func (m *Metric) Reset() { *m = Metric{} }
func (*Metric) ProtoMessage() {}
func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
func init() {
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
}
func (m *Metric) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintMetrics(dAtA, i, uint64(types1.SizeOfStdTime(m.Timestamp)))
n1, err := types1.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
if err != nil {
return 0, err
}
i += n1
if len(m.ID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if m.Data != nil {
dAtA[i] = 0x1a
i++
i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size()))
n2, err := m.Data.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Metric) Size() (n int) {
var l int
_ = l
l = types1.SizeOfStdTime(m.Timestamp)
n += 1 + l + sovMetrics(uint64(l))
l = len(m.ID)
if l > 0 {
n += 1 + l + sovMetrics(uint64(l))
}
if m.Data != nil {
l = m.Data.Size()
n += 1 + l + sovMetrics(uint64(l))
}
return n
}
func sovMetrics(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMetrics(x uint64) (n int) {
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Metric) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Metric{`,
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringMetrics(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Metric) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := types1.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMetrics
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMetrics
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Data == nil {
m.Data = &google_protobuf1.Any{}
}
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMetrics(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMetrics
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMetrics(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMetrics
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMetrics
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMetrics(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
}
var fileDescriptorMetrics = []byte{
// 258 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x46, 0x30, 0x24,
0xb1, 0x81, 0x6d, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xde, 0x0d, 0x02, 0xfe, 0x85, 0x01,
0x00, 0x00,
}

View file

@ -0,0 +1,15 @@
syntax = "proto3";
package containerd.types;
import weak "gogoproto/gogo.proto";
import "google/protobuf/any.proto";
import "google/protobuf/timestamp.proto";
option go_package = "github.com/containerd/containerd/api/types;types";
message Metric {
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
string id = 2;
google.protobuf.Any data = 3;
}

View file

@ -0,0 +1,456 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/mount.proto
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerD. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
type Mount struct {
// Type defines the nature of the mount.
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
// Target path in container
Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
// Options specifies zero or more fstab style mount options.
Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
func (*Mount) ProtoMessage() {}
func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
func init() {
proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
}
func (m *Mount) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if len(m.Source) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
i += copy(dAtA[i:], m.Source)
}
if len(m.Target) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
i += copy(dAtA[i:], m.Target)
}
if len(m.Options) > 0 {
for _, s := range m.Options {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Mount) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Source)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
l = len(m.Target)
if l > 0 {
n += 1 + l + sovMount(uint64(l))
}
if len(m.Options) > 0 {
for _, s := range m.Options {
l = len(s)
n += 1 + l + sovMount(uint64(l))
}
}
return n
}
func sovMount(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMount(x uint64) (n int) {
return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Mount) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Mount{`,
`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
`}`,
}, "")
return s
}
func valueToStringMount(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Mount) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Mount: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Source = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Target = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMount
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMount
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMount(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMount
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMount(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMount
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMount
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMount(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMount = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
}
var fileDescriptorMount = []byte{
// 202 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x11, 0x0c, 0x49, 0x6c, 0x60, 0xb7, 0x1b, 0x03, 0x02, 0x00, 0x00,
0xff, 0xff, 0x82, 0x1c, 0x02, 0x18, 0x1d, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,29 @@
syntax = "proto3";
package containerd.types;
import weak "gogoproto/gogo.proto";
option go_package = "github.com/containerd/containerd/api/types;types";
// Mount describes mounts for a container.
//
// This type is the lingua franca of ContainerD. All services provide mounts
// to be used with the container at creation time.
//
// The Mount type follows the structure of the mount syscall, including a type,
// source, target and options.
message Mount {
// Type defines the nature of the mount.
string type = 1;
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
string source = 2;
// Target path in container
string target = 3;
// Options specifies zero or more fstab style mount options.
repeated string options = 4;
}

View file

@ -0,0 +1,394 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/platform.proto
package types
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// Platform follows the structure of the OCI platform specification, from
// descriptors.
type Platform struct {
OS string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
Variant string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
}
func (m *Platform) Reset() { *m = Platform{} }
func (*Platform) ProtoMessage() {}
func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} }
func init() {
proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
}
func (m *Platform) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.OS) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS)))
i += copy(dAtA[i:], m.OS)
}
if len(m.Architecture) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture)))
i += copy(dAtA[i:], m.Architecture)
}
if len(m.Variant) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
i += copy(dAtA[i:], m.Variant)
}
return i, nil
}
func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Platform) Size() (n int) {
var l int
_ = l
l = len(m.OS)
if l > 0 {
n += 1 + l + sovPlatform(uint64(l))
}
l = len(m.Architecture)
if l > 0 {
n += 1 + l + sovPlatform(uint64(l))
}
l = len(m.Variant)
if l > 0 {
n += 1 + l + sovPlatform(uint64(l))
}
return n
}
func sovPlatform(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozPlatform(x uint64) (n int) {
return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Platform) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Platform{`,
`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
`}`,
}, "")
return s
}
func valueToStringPlatform(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Platform) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlatform
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Platform: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlatform
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlatform
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.OS = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlatform
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlatform
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Architecture = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowPlatform
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthPlatform
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Variant = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipPlatform(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthPlatform
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipPlatform(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlatform
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlatform
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlatform
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthPlatform
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowPlatform
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipPlatform(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowPlatform = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform)
}
var fileDescriptorPlatform = []byte{
// 205 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x8c, 0x60, 0x48, 0x62, 0x03, 0x3b, 0xdb, 0x18,
0x10, 0x00, 0x00, 0xff, 0xff, 0x05, 0xaa, 0xda, 0xa1, 0x1b, 0x01, 0x00, 0x00,
}

View file

@ -0,0 +1,15 @@
syntax = "proto3";
package containerd.types;
import weak "gogoproto/gogo.proto";
option go_package = "github.com/containerd/containerd/api/types;types";
// Platform follows the structure of the OCI platform specification, from
// descriptors.
message Platform {
string os = 1 [(gogoproto.customname) = "OS"];
string architecture = 2;
string variant = 3;
}

View file

@ -0,0 +1,890 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: github.com/containerd/containerd/api/types/task/task.proto
/*
Package task is a generated protocol buffer package.
It is generated from these files:
github.com/containerd/containerd/api/types/task/task.proto
It has these top-level messages:
Process
ProcessInfo
*/
package task
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto"
import _ "github.com/gogo/protobuf/types"
import google_protobuf2 "github.com/gogo/protobuf/types"
import time "time"
import types "github.com/gogo/protobuf/types"
import strings "strings"
import reflect "reflect"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type Status int32
const (
StatusUnknown Status = 0
StatusCreated Status = 1
StatusRunning Status = 2
StatusStopped Status = 3
StatusPaused Status = 4
StatusPausing Status = 5
)
var Status_name = map[int32]string{
0: "UNKNOWN",
1: "CREATED",
2: "RUNNING",
3: "STOPPED",
4: "PAUSED",
5: "PAUSING",
}
var Status_value = map[string]int32{
"UNKNOWN": 0,
"CREATED": 1,
"RUNNING": 2,
"STOPPED": 3,
"PAUSED": 4,
"PAUSING": 5,
}
func (x Status) String() string {
return proto.EnumName(Status_name, int32(x))
}
func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
type Process struct {
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
}
func (m *Process) Reset() { *m = Process{} }
func (*Process) ProtoMessage() {}
func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
type ProcessInfo struct {
// PID is the process ID.
Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
// Info contains additional process information.
//
// Info varies by platform.
Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
}
func (m *ProcessInfo) Reset() { *m = ProcessInfo{} }
func (*ProcessInfo) ProtoMessage() {}
func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
func init() {
proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
}
func (m *Process) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Process) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ContainerID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
i += copy(dAtA[i:], m.ContainerID)
}
if len(m.ID) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if m.Pid != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
}
if m.Status != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintTask(dAtA, i, uint64(m.Status))
}
if len(m.Stdin) > 0 {
dAtA[i] = 0x2a
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
i += copy(dAtA[i:], m.Stdin)
}
if len(m.Stdout) > 0 {
dAtA[i] = 0x32
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
i += copy(dAtA[i:], m.Stdout)
}
if len(m.Stderr) > 0 {
dAtA[i] = 0x3a
i++
i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
i += copy(dAtA[i:], m.Stderr)
}
if m.Terminal {
dAtA[i] = 0x40
i++
if m.Terminal {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.ExitStatus != 0 {
dAtA[i] = 0x48
i++
i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
}
dAtA[i] = 0x52
i++
i = encodeVarintTask(dAtA, i, uint64(types.SizeOfStdTime(m.ExitedAt)))
n1, err := types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
if err != nil {
return 0, err
}
i += n1
return i, nil
}
func (m *ProcessInfo) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Pid != 0 {
dAtA[i] = 0x8
i++
i = encodeVarintTask(dAtA, i, uint64(m.Pid))
}
if m.Info != nil {
dAtA[i] = 0x12
i++
i = encodeVarintTask(dAtA, i, uint64(m.Info.Size()))
n2, err := m.Info.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
return i, nil
}
func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *Process) Size() (n int) {
var l int
_ = l
l = len(m.ContainerID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
l = len(m.ID)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
if m.Pid != 0 {
n += 1 + sovTask(uint64(m.Pid))
}
if m.Status != 0 {
n += 1 + sovTask(uint64(m.Status))
}
l = len(m.Stdin)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
l = len(m.Stdout)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
l = len(m.Stderr)
if l > 0 {
n += 1 + l + sovTask(uint64(l))
}
if m.Terminal {
n += 2
}
if m.ExitStatus != 0 {
n += 1 + sovTask(uint64(m.ExitStatus))
}
l = types.SizeOfStdTime(m.ExitedAt)
n += 1 + l + sovTask(uint64(l))
return n
}
func (m *ProcessInfo) Size() (n int) {
var l int
_ = l
if m.Pid != 0 {
n += 1 + sovTask(uint64(m.Pid))
}
if m.Info != nil {
l = m.Info.Size()
n += 1 + l + sovTask(uint64(l))
}
return n
}
func sovTask(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozTask(x uint64) (n int) {
return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *Process) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&Process{`,
`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
`}`,
}, "")
return s
}
func (this *ProcessInfo) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ProcessInfo{`,
`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`,
`}`,
}, "")
return s
}
func valueToStringTask(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *Process) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Process: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ContainerID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
m.Pid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Status |= (Status(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Stdin = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Stdout = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Stderr = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Terminal = bool(v != 0)
case 9:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
}
m.ExitStatus = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ExitStatus |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 10:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if err := types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTask(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTask
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
}
m.Pid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Pid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTask
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTask
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Info == nil {
m.Info = &google_protobuf2.Any{}
}
if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTask(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTask
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTask(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTask
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTask
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTask
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthTask
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTask
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipTask(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTask = fmt.Errorf("proto: integer overflow")
)
func init() {
proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask)
}
var fileDescriptorTask = []byte{
// 545 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3f, 0x6f, 0xd3, 0x40,
0x18, 0xc6, 0x7d, 0x6e, 0xeb, 0xa6, 0xe7, 0xb6, 0x18, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea, 0x64,
0x31, 0xd8, 0x22, 0xdd, 0xd8, 0xf2, 0x4f, 0xc8, 0x42, 0x72, 0x23, 0x27, 0x11, 0x6c, 0x91, 0x13,
0x5f, 0xcc, 0xa9, 0xcd, 0x9d, 0x65, 0x9f, 0x81, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x49, 0x23, 0x60, 0x39,
0xbd, 0xef, 0xf3, 0x7b, 0xee, 0xbd, 0xf7, 0x1e, 0xf8, 0x22, 0xc6, 0xec, 0x6d, 0x3e, 0x75, 0x66,
0x74, 0xe1, 0xce, 0x28, 0x61, 0x21, 0x26, 0x28, 0x8d, 0x76, 0xcb, 0x30, 0xc1, 0x2e, 0x5b, 0x26,
0x28, 0x73, 0x59, 0x98, 0x5d, 0x95, 0x87, 0x93, 0xa4, 0x94, 0x51, 0xf5, 0xd1, 0xbd, 0xcb, 0x79,
0xf7, 0xdc, 0x29, 0x4d, 0x7a, 0x33, 0xa6, 0x31, 0x2d, 0xb9, 0xcb, 0xab, 0xca, 0xaa, 0x9b, 0x31,
0xa5, 0xf1, 0x35, 0x72, 0xcb, 0x6e, 0x9a, 0xcf, 0x5d, 0x86, 0x17, 0x28, 0x63, 0xe1, 0x22, 0xa9,
0x0d, 0x8f, 0xff, 0x36, 0x84, 0x64, 0x59, 0xa1, 0xf3, 0x42, 0x84, 0x87, 0x83, 0x94, 0xce, 0x50,
0x96, 0xa9, 0x2d, 0x78, 0xbc, 0x7d, 0x74, 0x82, 0x23, 0x0d, 0x58, 0xc0, 0x3e, 0xea, 0x3c, 0x28,
0xd6, 0xa6, 0xdc, 0xdd, 0xe8, 0x5e, 0x2f, 0x90, 0xb7, 0x26, 0x2f, 0x52, 0xcf, 0xa0, 0x88, 0x23,
0x4d, 0x2c, 0x9d, 0x52, 0xb1, 0x36, 0x45, 0xaf, 0x17, 0x88, 0x38, 0x52, 0x15, 0xb8, 0x97, 0xe0,
0x48, 0xdb, 0xb3, 0x80, 0x7d, 0x12, 0xf0, 0x52, 0xbd, 0x80, 0x52, 0xc6, 0x42, 0x96, 0x67, 0xda,
0xbe, 0x05, 0xec, 0xd3, 0xd6, 0x13, 0xe7, 0x3f, 0x3f, 0x74, 0x86, 0xa5, 0x25, 0xa8, 0xad, 0x6a,
0x13, 0x1e, 0x64, 0x2c, 0xc2, 0x44, 0x3b, 0xe0, 0x2f, 0x04, 0x55, 0xa3, 0x9e, 0xf1, 0x51, 0x11,
0xcd, 0x99, 0x26, 0x95, 0x72, 0xdd, 0xd5, 0x3a, 0x4a, 0x53, 0xed, 0x70, 0xab, 0xa3, 0x34, 0x55,
0x75, 0xd8, 0x60, 0x28, 0x5d, 0x60, 0x12, 0x5e, 0x6b, 0x0d, 0x0b, 0xd8, 0x8d, 0x60, 0xdb, 0xab,
0x26, 0x94, 0xd1, 0x07, 0xcc, 0x26, 0xf5, 0x6e, 0x47, 0xe5, 0xc2, 0x90, 0x4b, 0xd5, 0x2a, 0x6a,
0x1b, 0x1e, 0xf1, 0x0e, 0x45, 0x93, 0x90, 0x69, 0xd0, 0x02, 0xb6, 0xdc, 0xd2, 0x9d, 0x2a, 0x50,
0x67, 0x13, 0xa8, 0x33, 0xda, 0x24, 0xde, 0x69, 0xac, 0xd6, 0xa6, 0xf0, 0xf9, 0x97, 0x09, 0x82,
0x46, 0x75, 0xad, 0xcd, 0xce, 0x3d, 0x28, 0xd7, 0x19, 0x7b, 0x64, 0x4e, 0x37, 0xd9, 0x80, 0xfb,
0x6c, 0x6c, 0xb8, 0x8f, 0xc9, 0x9c, 0x96, 0x39, 0xca, 0xad, 0xe6, 0x3f, 0xe3, 0xdb, 0x64, 0x19,
0x94, 0x8e, 0x67, 0x3f, 0x00, 0x94, 0xea, 0xc5, 0x0c, 0x78, 0x38, 0xf6, 0x5f, 0xf9, 0x97, 0xaf,
0x7d, 0x45, 0xd0, 0x1f, 0xde, 0xdc, 0x5a, 0x27, 0x15, 0x18, 0x93, 0x2b, 0x42, 0xdf, 0x13, 0xce,
0xbb, 0x41, 0xbf, 0x3d, 0xea, 0xf7, 0x14, 0xb0, 0xcb, 0xbb, 0x29, 0x0a, 0x19, 0x8a, 0x38, 0x0f,
0xc6, 0xbe, 0xef, 0xf9, 0x2f, 0x15, 0x71, 0x97, 0x07, 0x39, 0x21, 0x98, 0xc4, 0x9c, 0x0f, 0x47,
0x97, 0x83, 0x41, 0xbf, 0xa7, 0xec, 0xed, 0xf2, 0x21, 0xa3, 0x49, 0x82, 0x22, 0xf5, 0x29, 0x94,
0x06, 0xed, 0xf1, 0xb0, 0xdf, 0x53, 0xf6, 0x75, 0xe5, 0xe6, 0xd6, 0x3a, 0xae, 0xf0, 0x20, 0xcc,
0xb3, 0x6a, 0x3a, 0xa7, 0x7c, 0xfa, 0xc1, 0xee, 0x6d, 0x8e, 0x31, 0x89, 0xf5, 0xd3, 0x4f, 0x5f,
0x0c, 0xe1, 0xdb, 0x57, 0xa3, 0xfe, 0x4d, 0x47, 0x5b, 0xdd, 0x19, 0xc2, 0xcf, 0x3b, 0x43, 0xf8,
0x58, 0x18, 0x60, 0x55, 0x18, 0xe0, 0x7b, 0x61, 0x80, 0xdf, 0x85, 0x01, 0xde, 0x08, 0x53, 0xa9,
0x0c, 0xe2, 0xe2, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x32, 0xd2, 0x86, 0x50, 0x03, 0x00,
0x00,
}

View file

@ -0,0 +1,41 @@
syntax = "proto3";
package containerd.v1.types;
import weak "gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/any.proto";
enum Status {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "Status";
UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"];
CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"];
RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"];
}
message Process {
string container_id = 1;
string id = 2;
uint32 pid = 3;
Status status = 4;
string stdin = 5;
string stdout = 6;
string stderr = 7;
bool terminal = 8;
uint32 exit_status = 9;
google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message ProcessInfo {
// PID is the process ID.
uint32 pid = 1;
// Info contains additional process information.
//
// Info varies by platform.
google.protobuf.Any info = 2;
}

View file

@ -0,0 +1,141 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compression
import (
"bufio"
"bytes"
"compress/gzip"
"fmt"
"io"
"sync"
)
type (
// Compression is the state represents if compressed or not.
Compression int
)
const (
// Uncompressed represents the uncompressed.
Uncompressed Compression = iota
// Gzip is gzip compression algorithm.
Gzip
)
var (
bufioReader32KPool = &sync.Pool{
New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) },
}
)
type readCloserWrapper struct {
io.Reader
closer func() error
}
func (r *readCloserWrapper) Close() error {
if r.closer != nil {
return r.closer()
}
return nil
}
type writeCloserWrapper struct {
io.Writer
closer func() error
}
func (w *writeCloserWrapper) Close() error {
if w.closer != nil {
w.closer()
}
return nil
}
// DetectCompression detects the compression algorithm of the source.
func DetectCompression(source []byte) Compression {
for compression, m := range map[Compression][]byte{
Gzip: {0x1F, 0x8B, 0x08},
} {
if len(source) < len(m) {
// Len too short
continue
}
if bytes.Equal(m, source[:len(m)]) {
return compression
}
}
return Uncompressed
}
// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive.
func DecompressStream(archive io.Reader) (io.ReadCloser, error) {
buf := bufioReader32KPool.Get().(*bufio.Reader)
buf.Reset(archive)
bs, err := buf.Peek(10)
if err != nil && err != io.EOF {
// Note: we'll ignore any io.EOF error because there are some odd
// cases where the layer.tar file will be empty (zero bytes) and
// that results in an io.EOF from the Peek() call. So, in those
// cases we'll just treat it as a non-compressed stream and
// that means just create an empty layer.
// See Issue docker/docker#18170
return nil, err
}
closer := func() error {
buf.Reset(nil)
bufioReader32KPool.Put(buf)
return nil
}
switch compression := DetectCompression(bs); compression {
case Uncompressed:
readBufWrapper := &readCloserWrapper{buf, closer}
return readBufWrapper, nil
case Gzip:
gzReader, err := gzip.NewReader(buf)
if err != nil {
return nil, err
}
readBufWrapper := &readCloserWrapper{gzReader, closer}
return readBufWrapper, nil
default:
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
// CompressStream compresseses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
switch compression {
case Uncompressed:
return &writeCloserWrapper{dest, nil}, nil
case Gzip:
return gzip.NewWriter(dest), nil
default:
return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension())
}
}
// Extension returns the extension of a file that uses the specified compression algorithm.
func (compression *Compression) Extension() string {
switch *compression {
case Gzip:
return "gz"
}
return ""
}

View file

@ -0,0 +1,83 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package compression
import (
"bytes"
"crypto/rand"
"io/ioutil"
"testing"
)
// generateData generates data that composed of 2 random parts
// and single zero-filled part within them.
// Typically, the compression ratio would be about 67%.
func generateData(t *testing.T, size int) []byte {
part0 := size / 3 // random
part2 := size / 3 // random
part1 := size - part0 - part2 // zero-filled
part0Data := make([]byte, part0)
if _, err := rand.Read(part0Data); err != nil {
t.Fatal(err)
}
part1Data := make([]byte, part1)
part2Data := make([]byte, part2)
if _, err := rand.Read(part2Data); err != nil {
t.Fatal(err)
}
return append(part0Data, append(part1Data, part2Data...)...)
}
func testCompressDecompress(t *testing.T, size int, compression Compression) {
orig := generateData(t, size)
var b bytes.Buffer
compressor, err := CompressStream(&b, compression)
if err != nil {
t.Fatal(err)
}
if n, err := compressor.Write(orig); err != nil || n != size {
t.Fatal(err)
}
compressor.Close()
compressed := b.Bytes()
t.Logf("compressed %d bytes to %d bytes (%.2f%%)",
len(orig), len(compressed), 100.0*float32(len(compressed))/float32(len(orig)))
if compared := bytes.Compare(orig, compressed); (compression == Uncompressed && compared != 0) ||
(compression != Uncompressed && compared == 0) {
t.Fatal("strange compressed data")
}
decompressor, err := DecompressStream(bytes.NewReader(compressed))
if err != nil {
t.Fatal(err)
}
decompressed, err := ioutil.ReadAll(decompressor)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(orig, decompressed) {
t.Fatal("strange decompressed data")
}
}
func TestCompressDecompressGzip(t *testing.T) {
testCompressDecompress(t, 1024*1024, Gzip)
}
func TestCompressDecompressUncompressed(t *testing.T) {
testCompressDecompress(t, 1024*1024, Uncompressed)
}

View file

@ -0,0 +1,63 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"bytes"
"context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/testutil"
)
// TestPrefixHeaderReadable tests that files that could be created with the
// version of this package that was built with <=go17 are still readable.
func TestPrefixHeaderReadable(t *testing.T) {
testutil.RequiresRoot(t)
// https://gist.github.com/stevvooe/e2a790ad4e97425896206c0816e1a882#file-out-go
var testFile = []byte("\x1f\x8b\x08\x08\x44\x21\x68\x59\x00\x03\x74\x2e\x74\x61\x72\x00\x4b\xcb\xcf\x67\xa0\x35\x30\x80\x00\x86\x06\x10\x47\x01\xc1\x37\x40\x00\x54\xb6\xb1\xa1\xa9\x99\x09\x48\x25\x1d\x40\x69\x71\x49\x62\x91\x02\xe5\x76\xa1\x79\x84\x21\x91\xd6\x80\x72\xaf\x8f\x82\x51\x30\x0a\x46\x36\x00\x00\xf0\x1c\x1e\x95\x00\x06\x00\x00")
tmpDir, err := ioutil.TempDir("", "prefix-test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tmpDir)
r, err := compression.DecompressStream(bytes.NewReader(testFile))
if err != nil {
t.Fatal(err)
}
defer r.Close()
_, err = Apply(context.Background(), tmpDir, r)
if err != nil {
t.Fatal(err)
}
baseName := "foo"
pth := strings.Repeat("a", 100-len(baseName)) + "/" + baseName
_, err = os.Lstat(filepath.Join(tmpDir, pth))
if err != nil {
t.Fatal(err)
}
}

View file

@ -0,0 +1,68 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"strconv"
"strings"
"time"
"archive/tar"
)
// Forked from https://github.com/golang/go/blob/master/src/archive/tar/strconv.go
// as archive/tar doesn't support CreationTime, but does handle PAX time parsing,
// and there's no need to re-invent the wheel.
// parsePAXTime takes a string of the form %d.%d as described in the PAX
// specification. Note that this implementation allows for negative timestamps,
// which is allowed for by the PAX specification, but not always portable.
func parsePAXTime(s string) (time.Time, error) {
const maxNanoSecondDigits = 9
// Split string into seconds and sub-seconds parts.
ss, sn := s, ""
if pos := strings.IndexByte(s, '.'); pos >= 0 {
ss, sn = s[:pos], s[pos+1:]
}
// Parse the seconds.
secs, err := strconv.ParseInt(ss, 10, 64)
if err != nil {
return time.Time{}, tar.ErrHeader
}
if len(sn) == 0 {
return time.Unix(secs, 0), nil // No sub-second values
}
// Parse the nanoseconds.
if strings.Trim(sn, "0123456789") != "" {
return time.Time{}, tar.ErrHeader
}
if len(sn) < maxNanoSecondDigits {
sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
} else {
sn = sn[:maxNanoSecondDigits] // Right truncate
}
nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
if len(ss) > 0 && ss[0] == '-' {
return time.Unix(secs, -nsecs), nil // Negative correction
}
return time.Unix(secs, nsecs), nil
}

650
vendor/github.com/containerd/containerd/archive/tar.go generated vendored Normal file
View file

@ -0,0 +1,650 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/containerd/containerd/log"
"github.com/containerd/continuity/fs"
"github.com/pkg/errors"
)
var bufPool = &sync.Pool{
New: func() interface{} {
buffer := make([]byte, 32*1024)
return &buffer
},
}
var errInvalidArchive = errors.New("invalid archive")
// Diff returns a tar stream of the computed filesystem
// difference between the provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func Diff(ctx context.Context, a, b string) io.ReadCloser {
r, w := io.Pipe()
go func() {
err := WriteDiff(ctx, w, a, b)
if err = w.CloseWithError(err); err != nil {
log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
}
}()
return r
}
// WriteDiff writes a tar stream of the computed difference between the
// provided directories.
//
// Produces a tar using OCI style file markers for deletions. Deleted
// files will be prepended with the prefix ".wh.". This style is
// based off AUFS whiteouts.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md
func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
cw := newChangeWriter(w, b)
err := fs.Changes(ctx, a, b, cw.HandleChange)
if err != nil {
return errors.Wrap(err, "failed to create diff tar stream")
}
return cw.Close()
}
const (
// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
whiteoutPrefix = ".wh."
// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
paxSchilyXattr = "SCHILY.xattrs."
)
// Apply applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int64, error) {
root = filepath.Clean(root)
var options ApplyOptions
for _, opt := range opts {
if err := opt(&options); err != nil {
return 0, errors.Wrap(err, "failed to apply option")
}
}
return apply(ctx, root, tar.NewReader(r), options)
}
// applyNaive applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
var (
dirs []*tar.Header
// Used for handling opaque directory markers which
// may occur out of order
unpackedPaths = make(map[string]struct{})
// Used for aufs plink directory
aufsTempdir = ""
aufsHardlinks = make(map[string]*tar.Header)
)
// Iterate through the files in the archive.
for {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
if skipFile(hdr) {
log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
continue
}
// Split name and resolve symlinks for root directory.
ppath, base := filepath.Split(hdr.Name)
ppath, err = fs.RootPath(root, ppath)
if err != nil {
return 0, errors.Wrap(err, "failed to get root path")
}
// Join to root before joining to parent path to ensure relative links are
// already resolved based on the root before adding to parent.
path := filepath.Join(ppath, filepath.Join("/", base))
if path == root {
log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
continue
}
// If file is not directly under root, ensure parent directory
// exists or is created.
if ppath != root {
parentPath := ppath
if base == "" {
parentPath = filepath.Dir(path)
}
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = mkdirAll(parentPath, 0700)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
p, err := fs.RootPath(aufsTempdir, basename)
if err != nil {
return 0, err
}
if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
return 0, err
}
}
if hdr.Name != whiteoutOpaqueDir {
continue
}
}
if strings.HasPrefix(base, whiteoutPrefix) {
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
continue
}
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
// Ensure originalPath is under dir
if dir[len(dir)-1] != filepath.Separator {
dir += string(filepath.Separator)
}
if !strings.HasPrefix(originalPath, dir) {
return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
}
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
continue
}
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
srcData := io.Reader(tr)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
p, err := fs.RootPath(aufsTempdir, linkBasename)
if err != nil {
return 0, err
}
tmpFile, err := os.Open(p)
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
for _, hdr := range dirs {
path, err := fs.RootPath(root, hdr.Name)
if err != nil {
return 0, err
}
if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
return 0, err
}
}
return size, nil
}
func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
// hdr.Mode is in linux format, which we can use for syscalls,
// but for os.Foo() calls we need the mode converted to os.FileMode,
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
hdrInfo := hdr.FileInfo()
switch hdr.Typeflag {
case tar.TypeDir:
// Create directory unless it exists as a directory already.
// In that case we just want to merge the two
if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
if err := mkdir(path, hdrInfo.Mode()); err != nil {
return err
}
}
case tar.TypeReg, tar.TypeRegA:
file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
if err != nil {
return err
}
_, err = copyBuffered(ctx, file, reader)
if err1 := file.Close(); err == nil {
err = err1
}
if err != nil {
return err
}
case tar.TypeBlock, tar.TypeChar:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeFifo:
// Handle this is an OS-specific way
if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
return err
}
case tar.TypeLink:
targetPath, err := fs.RootPath(extractDir, hdr.Linkname)
if err != nil {
return err
}
if err := os.Link(targetPath, path); err != nil {
return err
}
case tar.TypeSymlink:
if err := os.Symlink(hdr.Linkname, path); err != nil {
return err
}
case tar.TypeXGlobalHeader:
log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
return nil
default:
return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
}
// Lchown is not supported on Windows.
if runtime.GOOS != "windows" {
if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
return err
}
}
for key, value := range hdr.PAXRecords {
if strings.HasPrefix(key, paxSchilyXattr) {
key = key[len(paxSchilyXattr):]
if err := setxattr(path, key, value); err != nil {
if errors.Cause(err) == syscall.ENOTSUP {
log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
continue
}
return err
}
}
}
// There is no LChmod, so ignore mode for symlink. Also, this
// must happen after chown, as that can modify the file mode
if err := handleLChmod(hdr, path, hdrInfo); err != nil {
return err
}
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
}
type changeWriter struct {
tw *tar.Writer
source string
whiteoutT time.Time
inodeSrc map[uint64]string
inodeRefs map[uint64][]string
addedDirs map[string]struct{}
}
func newChangeWriter(w io.Writer, source string) *changeWriter {
return &changeWriter{
tw: tar.NewWriter(w),
source: source,
whiteoutT: time.Now(),
inodeSrc: map[uint64]string{},
inodeRefs: map[uint64][]string{},
addedDirs: map[string]struct{}{},
}
}
func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if k == fs.ChangeKindDelete {
whiteOutDir := filepath.Dir(p)
whiteOutBase := filepath.Base(p)
whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
hdr := &tar.Header{
Typeflag: tar.TypeReg,
Name: whiteOut[1:],
Size: 0,
ModTime: cw.whiteoutT,
AccessTime: cw.whiteoutT,
ChangeTime: cw.whiteoutT,
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write whiteout header")
}
} else {
var (
link string
err error
source = filepath.Join(cw.source, p)
)
switch {
case f.Mode()&os.ModeSocket != 0:
return nil // ignore sockets
case f.Mode()&os.ModeSymlink != 0:
if link, err = os.Readlink(source); err != nil {
return err
}
}
hdr, err := tar.FileInfoHeader(f, link)
if err != nil {
return err
}
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
name := p
if strings.HasPrefix(name, string(filepath.Separator)) {
name, err = filepath.Rel(string(filepath.Separator), name)
if err != nil {
return errors.Wrap(err, "failed to make path relative")
}
}
name, err = tarName(name)
if err != nil {
return errors.Wrap(err, "cannot canonicalize path")
}
// suffix with '/' for directories
if f.IsDir() && !strings.HasSuffix(name, "/") {
name += "/"
}
hdr.Name = name
if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
return errors.Wrap(err, "failed to set device headers")
}
// additionalLinks stores file names which must be linked to
// this file when this file is added
var additionalLinks []string
inode, isHardlink := fs.GetLinkInfo(f)
if isHardlink {
// If the inode has a source, always link to it
if source, ok := cw.inodeSrc[inode]; ok {
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
} else {
if k == fs.ChangeKindUnmodified {
cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
return nil
}
cw.inodeSrc[inode] = name
additionalLinks = cw.inodeRefs[inode]
delete(cw.inodeRefs, inode)
}
} else if k == fs.ChangeKindUnmodified {
// Nothing to write to diff
return nil
}
if capability, err := getxattr(source, "security.capability"); err != nil {
return errors.Wrap(err, "failed to get capabilities xattr")
} else if capability != nil {
if hdr.PAXRecords == nil {
hdr.PAXRecords = map[string]string{}
}
hdr.PAXRecords[paxSchilyXattr+"security.capability"] = string(capability)
}
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
file, err := open(source)
if err != nil {
return errors.Wrapf(err, "failed to open path: %v", source)
}
defer file.Close()
n, err := copyBuffered(context.TODO(), cw.tw, file)
if err != nil {
return errors.Wrap(err, "failed to copy")
}
if n != hdr.Size {
return errors.New("short write copying file")
}
}
if additionalLinks != nil {
source = hdr.Name
for _, extra := range additionalLinks {
hdr.Name = extra
hdr.Typeflag = tar.TypeLink
hdr.Linkname = source
hdr.Size = 0
if err := cw.includeParents(hdr); err != nil {
return err
}
if err := cw.tw.WriteHeader(hdr); err != nil {
return errors.Wrap(err, "failed to write file header")
}
}
}
}
return nil
}
func (cw *changeWriter) Close() error {
if err := cw.tw.Close(); err != nil {
return errors.Wrap(err, "failed to close tar writer")
}
return nil
}
func (cw *changeWriter) includeParents(hdr *tar.Header) error {
name := strings.TrimRight(hdr.Name, "/")
fname := filepath.Join(cw.source, name)
parent := filepath.Dir(name)
pname := filepath.Join(cw.source, parent)
// Do not include root directory as parent
if fname != cw.source && pname != cw.source {
_, ok := cw.addedDirs[parent]
if !ok {
cw.addedDirs[parent] = struct{}{}
fi, err := os.Stat(pname)
if err != nil {
return err
}
if err := cw.HandleChange(fs.ChangeKindModify, parent, fi, nil); err != nil {
return err
}
}
}
if hdr.Typeflag == tar.TypeDir {
cw.addedDirs[name] = struct{}{}
}
return nil
}
func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
for {
select {
case <-ctx.Done():
err = ctx.Err()
return
default:
}
nr, er := src.Read(*buf)
if nr > 0 {
nw, ew := dst.Write((*buf)[0:nr])
if nw > 0 {
written += int64(nw)
}
if ew != nil {
err = ew
break
}
if nr != nw {
err = io.ErrShortWrite
break
}
}
if er != nil {
if er != io.EOF {
err = er
}
break
}
}
return written, err
}

View file

@ -0,0 +1,20 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
// ApplyOpt allows setting mutable archive apply properties on creation
type ApplyOpt func(options *ApplyOptions) error

View file

@ -0,0 +1,23 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
// ApplyOptions provides additional options for an Apply operation
type ApplyOptions struct {
}

View file

@ -0,0 +1,44 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
// ApplyOptions provides additional options for an Apply operation
type ApplyOptions struct {
ParentLayerPaths []string // Parent layer paths used for Windows layer apply
IsWindowsContainerLayer bool // True if the tar stream to be applied is a Windows Container Layer
}
// WithParentLayers adds parent layers to the apply process this is required
// for all Windows layers except the base layer.
func WithParentLayers(parentPaths []string) ApplyOpt {
return func(options *ApplyOptions) error {
options.ParentLayerPaths = parentPaths
return nil
}
}
// AsWindowsContainerLayer indicates that the tar stream to apply is that of
// a Windows Container Layer. The caller must be holding SeBackupPrivilege and
// SeRestorePrivilege.
func AsWindowsContainerLayer() ApplyOpt {
return func(options *ApplyOptions) error {
options.IsWindowsContainerLayer = true
return nil
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,159 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"context"
"os"
"sync"
"syscall"
"github.com/containerd/continuity/sysx"
"github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
)
func tarName(p string) (string, error) {
return p, nil
}
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
s, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return errors.New("unsupported stat type")
}
// Rdev is int32 on darwin/bsd, int64 on linux/solaris
rdev := uint64(s.Rdev) // nolint: unconvert
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(unix.Major(rdev))
hdr.Devminor = int64(unix.Minor(rdev))
}
return nil
}
func open(p string) (*os.File, error) {
return os.Open(p)
}
func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
// Call chmod to avoid permission mask
if err := os.Chmod(name, perm); err != nil {
return nil, err
}
return f, err
}
func mkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func mkdir(path string, perm os.FileMode) error {
if err := os.Mkdir(path, perm); err != nil {
return err
}
// Only final created directory gets explicit permission
// call to avoid permission mask
return os.Chmod(path, perm)
}
var (
inUserNS bool
nsOnce sync.Once
)
func setInUserNS() {
inUserNS = system.RunningInUserNS()
}
func skipFile(hdr *tar.Header) bool {
switch hdr.Typeflag {
case tar.TypeBlock, tar.TypeChar:
// cannot create a device if running in user namespace
nsOnce.Do(setInUserNS)
return inUserNS
default:
return false
}
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo.
// This function must not be called for Block and Char when running in userns.
// (skipFile() should return true for them.)
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= unix.S_IFBLK
case tar.TypeChar:
mode |= unix.S_IFCHR
case tar.TypeFifo:
mode |= unix.S_IFIFO
}
return unix.Mknod(path, mode, int(unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))))
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}
func getxattr(path, attr string) ([]byte, error) {
b, err := sysx.LGetxattr(path, attr)
if err == unix.ENOTSUP || err == sysx.ENODATA {
return nil, nil
}
return b, err
}
func setxattr(path, key, value string) error {
return sysx.LSetxattr(path, key, []byte(value), 0)
}
// apply applies a tar stream of an OCI style diff tar.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
return applyNaive(ctx, root, tr, options)
}

View file

@ -0,0 +1,449 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"bufio"
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"syscall"
"github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim"
"github.com/containerd/containerd/sys"
)
const (
// MSWINDOWS pax vendor extensions
hdrMSWindowsPrefix = "MSWINDOWS."
hdrFileAttributes = hdrMSWindowsPrefix + "fileattr"
hdrSecurityDescriptor = hdrMSWindowsPrefix + "sd"
hdrRawSecurityDescriptor = hdrMSWindowsPrefix + "rawsd"
hdrMountPoint = hdrMSWindowsPrefix + "mountpoint"
hdrEaPrefix = hdrMSWindowsPrefix + "xattr."
// LIBARCHIVE pax vendor extensions
hdrLibArchivePrefix = "LIBARCHIVE."
hdrCreateTime = hdrLibArchivePrefix + "creationtime"
)
var (
// mutatedFiles is a list of files that are mutated by the import process
// and must be backed up and restored.
mutatedFiles = map[string]string{
"UtilityVM/Files/EFI/Microsoft/Boot/BCD": "bcd.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG": "bcd.log.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG1": "bcd.log1.bak",
"UtilityVM/Files/EFI/Microsoft/Boot/BCD.LOG2": "bcd.log2.bak",
}
)
// tarName returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func tarName(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return nil
}
func open(p string) (*os.File, error) {
// We use sys.OpenSequential to ensure we use sequential file
// access on Windows to avoid depleting the standby list.
return sys.OpenSequential(p)
}
func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
// Source is regular file. We use sys.OpenFileSequential to use sequential
// file access to avoid depleting the standby list on Windows.
return sys.OpenFileSequential(name, flag, perm)
}
func mkdirAll(path string, perm os.FileMode) error {
return sys.MkdirAll(path, perm)
}
func mkdir(path string, perm os.FileMode) error {
return os.Mkdir(path, perm)
}
func skipFile(hdr *tar.Header) bool {
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if strings.Contains(hdr.Name, ":") {
return true
}
return false
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getxattr(path, attr string) ([]byte, error) {
return nil, nil
}
func setxattr(path, key, value string) error {
// Return not support error, do not wrap underlying not supported
// since xattrs should not exist in windows diff archives
return errors.New("xattrs not supported on Windows")
}
// apply applies a tar stream of an OCI style diff tar of a Windows layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
if options.IsWindowsContainerLayer {
return applyWindowsLayer(ctx, root, tr, options)
}
return applyNaive(ctx, root, tr, options)
}
// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
home, id := filepath.Split(root)
info := hcsshim.DriverInfo{
HomeDir: home,
}
w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths)
if err != nil {
return 0, err
}
defer func() {
if err2 := w.Close(); err2 != nil {
// This error should not be discarded as a failure here
// could result in an invalid layer on disk
if err == nil {
err = err2
}
}
}()
buf := bufio.NewWriter(nil)
hdr, nextErr := tr.Next()
// Iterate through the files in the archive.
for {
select {
case <-ctx.Done():
return 0, ctx.Err()
default:
}
if nextErr == io.EOF {
// end of tar archive
break
}
if nextErr != nil {
return 0, nextErr
}
// Note: path is used instead of filepath to prevent OS specific handling
// of the tar path
base := path.Base(hdr.Name)
if strings.HasPrefix(base, whiteoutPrefix) {
dir := path.Dir(hdr.Name)
originalBase := base[len(whiteoutPrefix):]
originalPath := path.Join(dir, originalBase)
if err := w.Remove(filepath.FromSlash(originalPath)); err != nil {
return 0, err
}
hdr, nextErr = tr.Next()
} else if hdr.Typeflag == tar.TypeLink {
err := w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname))
if err != nil {
return 0, err
}
hdr, nextErr = tr.Next()
} else {
name, fileSize, fileInfo, err := fileInfoFromHeader(hdr)
if err != nil {
return 0, err
}
if err := w.Add(filepath.FromSlash(name), fileInfo); err != nil {
return 0, err
}
size += fileSize
hdr, nextErr = tarToBackupStreamWithMutatedFiles(buf, w, tr, hdr, root)
}
}
return
}
// fileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by
// WriteTarFileFromBackupStream.
func fileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) {
name = hdr.Name
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
size = hdr.Size
}
fileInfo = &winio.FileBasicInfo{
LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()),
LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()),
// Default CreationTime to ModTime, updated below if MSWINDOWS.createtime exists
CreationTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()),
}
if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok {
attr, err := strconv.ParseUint(attrStr, 10, 32)
if err != nil {
return "", 0, nil, err
}
fileInfo.FileAttributes = uintptr(attr)
} else {
if hdr.Typeflag == tar.TypeDir {
fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY
}
}
if createStr, ok := hdr.PAXRecords[hdrCreateTime]; ok {
createTime, err := parsePAXTime(createStr)
if err != nil {
return "", 0, nil, err
}
fileInfo.CreationTime = syscall.NsecToFiletime(createTime.UnixNano())
}
return
}
// tarToBackupStreamWithMutatedFiles reads data from a tar stream and
// writes it to a backup stream, and also saves any files that will be mutated
// by the import layer process to a backup location.
func tarToBackupStreamWithMutatedFiles(buf *bufio.Writer, w io.Writer, t *tar.Reader, hdr *tar.Header, root string) (nextHdr *tar.Header, err error) {
var (
bcdBackup *os.File
bcdBackupWriter *winio.BackupFileWriter
)
if backupPath, ok := mutatedFiles[hdr.Name]; ok {
bcdBackup, err = os.Create(filepath.Join(root, backupPath))
if err != nil {
return nil, err
}
defer func() {
cerr := bcdBackup.Close()
if err == nil {
err = cerr
}
}()
bcdBackupWriter = winio.NewBackupFileWriter(bcdBackup, false)
defer func() {
cerr := bcdBackupWriter.Close()
if err == nil {
err = cerr
}
}()
buf.Reset(io.MultiWriter(w, bcdBackupWriter))
} else {
buf.Reset(w)
}
defer func() {
ferr := buf.Flush()
if err == nil {
err = ferr
}
}()
return writeBackupStreamFromTarFile(buf, t, hdr)
}
// writeBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple
// tar file entries in order to collect all the alternate data streams for the file, it returns the next
// tar file that was not processed, or io.EOF is there are no more.
func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) {
bw := winio.NewBackupStreamWriter(w)
var sd []byte
var err error
// Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written
// by this library will have raw binary for the security descriptor.
if sddl, ok := hdr.PAXRecords[hdrSecurityDescriptor]; ok {
sd, err = winio.SddlToSecurityDescriptor(sddl)
if err != nil {
return nil, err
}
}
if sdraw, ok := hdr.PAXRecords[hdrRawSecurityDescriptor]; ok {
sd, err = base64.StdEncoding.DecodeString(sdraw)
if err != nil {
return nil, err
}
}
if len(sd) != 0 {
bhdr := winio.BackupHeader{
Id: winio.BackupSecurity,
Size: int64(len(sd)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(sd)
if err != nil {
return nil, err
}
}
var eas []winio.ExtendedAttribute
for k, v := range hdr.PAXRecords {
if !strings.HasPrefix(k, hdrEaPrefix) {
continue
}
data, err := base64.StdEncoding.DecodeString(v)
if err != nil {
return nil, err
}
eas = append(eas, winio.ExtendedAttribute{
Name: k[len(hdrEaPrefix):],
Value: data,
})
}
if len(eas) != 0 {
eadata, err := winio.EncodeExtendedAttributes(eas)
if err != nil {
return nil, err
}
bhdr := winio.BackupHeader{
Id: winio.BackupEaData,
Size: int64(len(eadata)),
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(eadata)
if err != nil {
return nil, err
}
}
if hdr.Typeflag == tar.TypeSymlink {
_, isMountPoint := hdr.PAXRecords[hdrMountPoint]
rp := winio.ReparsePoint{
Target: filepath.FromSlash(hdr.Linkname),
IsMountPoint: isMountPoint,
}
reparse := winio.EncodeReparsePoint(&rp)
bhdr := winio.BackupHeader{
Id: winio.BackupReparseData,
Size: int64(len(reparse)),
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = bw.Write(reparse)
if err != nil {
return nil, err
}
}
buf := bufPool.Get().(*[]byte)
defer bufPool.Put(buf)
if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA {
bhdr := winio.BackupHeader{
Id: winio.BackupData,
Size: hdr.Size,
}
err := bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.CopyBuffer(bw, t, *buf)
if err != nil {
return nil, err
}
}
// Copy all the alternate data streams and return the next non-ADS header.
for {
ahdr, err := t.Next()
if err != nil {
return nil, err
}
if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") {
return ahdr, nil
}
bhdr := winio.BackupHeader{
Id: winio.BackupAlternateData,
Size: ahdr.Size,
Name: ahdr.Name[len(hdr.Name):] + ":$DATA",
}
err = bw.WriteHeader(&bhdr)
if err != nil {
return nil, err
}
_, err = io.CopyBuffer(bw, t, *buf)
if err != nil {
return nil, err
}
}
}

View file

@ -0,0 +1,54 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"syscall"
"time"
"unsafe"
)
var (
minTime = time.Unix(0, 0)
maxTime time.Time
)
func init() {
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
// This is a 64 bit timespec
// os.Chtimes limits time to the following
maxTime = time.Unix(0, 1<<63-1)
} else {
// This is a 32 bit timespec
maxTime = time.Unix(1<<31-1, 0)
}
}
func boundTime(t time.Time) time.Time {
if t.Before(minTime) || t.After(maxTime) {
return minTime
}
return t
}
func latestTime(t1, t2 time.Time) time.Time {
if t1.Before(t2) {
return t2
}
return t1
}

View file

@ -0,0 +1,30 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"time"
"github.com/pkg/errors"
)
// as at MacOS 10.12 there is apparently no way to set timestamps
// with nanosecond precision. We could fall back to utimes/lutimes
// and lose the precision as a temporary workaround.
func chtimes(path string, atime, mtime time.Time) error {
return errors.New("OSX missing UtimesNanoAt")
}

View file

@ -0,0 +1,39 @@
// +build linux freebsd solaris
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"time"
"golang.org/x/sys/unix"
"github.com/pkg/errors"
)
func chtimes(path string, atime, mtime time.Time) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(atime.UnixNano())
utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrap(err, "failed call to UtimesNanoAt")
}
return nil
}

View file

@ -0,0 +1,42 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"time"
"golang.org/x/sys/windows"
)
// chtimes will set the create time on a file using the given modtime.
// This requires calling SetFileTime and explicitly including the create time.
func chtimes(path string, atime, mtime time.Time) error {
ctimespec := windows.NsecToTimespec(mtime.UnixNano())
pathp, e := windows.UTF16PtrFromString(path)
if e != nil {
return e
}
h, e := windows.CreateFile(pathp,
windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
if e != nil {
return e
}
defer windows.Close(h)
c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
return windows.SetFileTime(h, &c, nil, nil)
}

View file

@ -1,3 +1,19 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (

226
vendor/github.com/containerd/containerd/cio/io.go generated vendored Normal file
View file

@ -0,0 +1,226 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cio
import (
"context"
"fmt"
"io"
"os"
"sync"
"github.com/containerd/containerd/defaults"
)
var bufPool = sync.Pool{
New: func() interface{} {
buffer := make([]byte, 32<<10)
return &buffer
},
}
// Config holds the IO configurations.
type Config struct {
// Terminal is true if one has been allocated
Terminal bool
// Stdin path
Stdin string
// Stdout path
Stdout string
// Stderr path
Stderr string
}
// IO holds the io information for a task or process
type IO interface {
// Config returns the IO configuration.
Config() Config
// Cancel aborts all current io operations.
Cancel()
// Wait blocks until all io copy operations have completed.
Wait()
// Close cleans up all open io resources. Cancel() is always called before
// Close()
Close() error
}
// Creator creates new IO sets for a task
type Creator func(id string) (IO, error)
// Attach allows callers to reattach to running tasks
//
// There should only be one reader for a task's IO set
// because fifo's can only be read from one reader or the output
// will be sent only to the first reads
type Attach func(*FIFOSet) (IO, error)
// FIFOSet is a set of file paths to FIFOs for a task's standard IO streams
type FIFOSet struct {
Config
close func() error
}
// Close the FIFOSet
func (f *FIFOSet) Close() error {
if f.close != nil {
return f.close()
}
return nil
}
// NewFIFOSet returns a new FIFOSet from a Config and a close function
func NewFIFOSet(config Config, close func() error) *FIFOSet {
return &FIFOSet{Config: config, close: close}
}
// Streams used to configure a Creator or Attach
type Streams struct {
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
Terminal bool
FIFODir string
}
// Opt customize options for creating a Creator or Attach
type Opt func(*Streams)
// WithStdio sets stream options to the standard input/output streams
func WithStdio(opt *Streams) {
WithStreams(os.Stdin, os.Stdout, os.Stderr)(opt)
}
// WithTerminal sets the terminal option
func WithTerminal(opt *Streams) {
opt.Terminal = true
}
// WithStreams sets the stream options to the specified Reader and Writers
func WithStreams(stdin io.Reader, stdout, stderr io.Writer) Opt {
return func(opt *Streams) {
opt.Stdin = stdin
opt.Stdout = stdout
opt.Stderr = stderr
}
}
// WithFIFODir sets the fifo directory.
// e.g. "/run/containerd/fifo", "/run/users/1001/containerd/fifo"
func WithFIFODir(dir string) Opt {
return func(opt *Streams) {
opt.FIFODir = dir
}
}
// NewCreator returns an IO creator from the options
func NewCreator(opts ...Opt) Creator {
streams := &Streams{}
for _, opt := range opts {
opt(streams)
}
if streams.FIFODir == "" {
streams.FIFODir = defaults.DefaultFIFODir
}
return func(id string) (IO, error) {
fifos, err := NewFIFOSetInDir(streams.FIFODir, id, streams.Terminal)
if err != nil {
return nil, err
}
return copyIO(fifos, streams)
}
}
// NewAttach attaches the existing io for a task to the provided io.Reader/Writers
func NewAttach(opts ...Opt) Attach {
streams := &Streams{}
for _, opt := range opts {
opt(streams)
}
return func(fifos *FIFOSet) (IO, error) {
if fifos == nil {
return nil, fmt.Errorf("cannot attach, missing fifos")
}
return copyIO(fifos, streams)
}
}
// NullIO redirects the container's IO into /dev/null
func NullIO(_ string) (IO, error) {
return &cio{}, nil
}
// cio is a basic container IO implementation.
type cio struct {
config Config
wg *sync.WaitGroup
closers []io.Closer
cancel context.CancelFunc
}
func (c *cio) Config() Config {
return c.config
}
func (c *cio) Wait() {
if c.wg != nil {
c.wg.Wait()
}
}
func (c *cio) Close() error {
var lastErr error
for _, closer := range c.closers {
if closer == nil {
continue
}
if err := closer.Close(); err != nil {
lastErr = err
}
}
return lastErr
}
func (c *cio) Cancel() {
if c.cancel != nil {
c.cancel()
}
}
type pipes struct {
Stdin io.WriteCloser
Stdout io.ReadCloser
Stderr io.ReadCloser
}
// DirectIO allows task IO to be handled externally by the caller
type DirectIO struct {
pipes
cio
}
var _ IO = &DirectIO{}
// Load the io for a container but do not attach
//
// Allows io to be loaded on the task for deletion without
// starting copy routines
func Load(set *FIFOSet) (IO, error) {
return &cio{
config: set.Config,
closers: []io.Closer{set},
}, nil
}

155
vendor/github.com/containerd/containerd/cio/io_test.go generated vendored Normal file
View file

@ -0,0 +1,155 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cio
import (
"bytes"
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
"github.com/containerd/fifo"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/gotestyourself/gotestyourself/assert"
is "github.com/gotestyourself/gotestyourself/assert/cmp"
)
func assertHasPrefix(t *testing.T, s, prefix string) {
t.Helper()
if !strings.HasPrefix(s, prefix) {
t.Fatalf("expected %s to start with %s", s, prefix)
}
}
func TestNewFIFOSetInDir(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("NewFIFOSetInDir has different behaviour on windows")
}
root, err := ioutil.TempDir("", "test-new-fifo-set")
assert.NilError(t, err)
defer os.RemoveAll(root)
fifos, err := NewFIFOSetInDir(root, "theid", true)
assert.NilError(t, err)
dir := filepath.Dir(fifos.Stdin)
assertHasPrefix(t, dir, root)
expected := &FIFOSet{
Config: Config{
Stdin: filepath.Join(dir, "theid-stdin"),
Stdout: filepath.Join(dir, "theid-stdout"),
Stderr: filepath.Join(dir, "theid-stderr"),
Terminal: true,
},
}
assert.Assert(t, is.DeepEqual(fifos, expected, cmpFIFOSet))
files, err := ioutil.ReadDir(root)
assert.NilError(t, err)
assert.Check(t, is.Len(files, 1))
assert.NilError(t, fifos.Close())
files, err = ioutil.ReadDir(root)
assert.NilError(t, err)
assert.Check(t, is.Len(files, 0))
}
var cmpFIFOSet = cmpopts.IgnoreUnexported(FIFOSet{})
func TestNewAttach(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("setupFIFOProducers not yet implemented on windows")
}
var (
expectedStdin = "this is the stdin"
expectedStdout = "this is the stdout"
expectedStderr = "this is the stderr"
stdin = bytes.NewBufferString(expectedStdin)
stdout = new(bytes.Buffer)
stderr = new(bytes.Buffer)
)
withBytesBuffers := func(streams *Streams) {
*streams = Streams{Stdin: stdin, Stdout: stdout, Stderr: stderr}
}
attacher := NewAttach(withBytesBuffers)
fifos, err := NewFIFOSetInDir("", "theid", false)
assert.NilError(t, err)
io, err := attacher(fifos)
assert.NilError(t, err)
defer io.Close()
producers := setupFIFOProducers(t, io.Config())
initProducers(t, producers, expectedStdout, expectedStderr)
actualStdin, err := ioutil.ReadAll(producers.Stdin)
assert.NilError(t, err)
io.Wait()
io.Cancel()
assert.NilError(t, io.Close())
assert.Check(t, is.Equal(expectedStdout, stdout.String()))
assert.Check(t, is.Equal(expectedStderr, stderr.String()))
assert.Check(t, is.Equal(expectedStdin, string(actualStdin)))
}
type producers struct {
Stdin io.ReadCloser
Stdout io.WriteCloser
Stderr io.WriteCloser
}
func setupFIFOProducers(t *testing.T, fifos Config) producers {
var (
err error
pipes producers
ctx = context.Background()
)
pipes.Stdin, err = fifo.OpenFifo(ctx, fifos.Stdin, syscall.O_RDONLY, 0)
assert.NilError(t, err)
pipes.Stdout, err = fifo.OpenFifo(ctx, fifos.Stdout, syscall.O_WRONLY, 0)
assert.NilError(t, err)
pipes.Stderr, err = fifo.OpenFifo(ctx, fifos.Stderr, syscall.O_WRONLY, 0)
assert.NilError(t, err)
return pipes
}
func initProducers(t *testing.T, producers producers, stdout, stderr string) {
_, err := producers.Stdout.Write([]byte(stdout))
assert.NilError(t, err)
assert.NilError(t, producers.Stdout.Close())
_, err = producers.Stderr.Write([]byte(stderr))
assert.NilError(t, err)
assert.NilError(t, producers.Stderr.Close())
}

Some files were not shown because too many files have changed in this diff Show more